"""
analyzer.py — Rule-based scoring engine and recommendation generator.

No LLM API calls. Pure deterministic scoring from scraped data.
"""

from typing import Tuple


# ---------------------------------------------------------------------------
# Score weights for overall AI Score
# ---------------------------------------------------------------------------
CATEGORY_WEIGHTS = {
    "website_speed": 0.20,
    "mobile_experience": 0.20,
    "online_presence": 0.20,
    "seo_basics": 0.20,
    "ai_readiness": 0.20,
}


# ---------------------------------------------------------------------------
# Individual Category Scorers
# ---------------------------------------------------------------------------

def _score_website_speed(data: dict) -> Tuple[int, dict]:
    """
    Score website speed based on PageSpeed performance metrics.
    Returns (score 0-100, breakdown dict).
    """
    ps = data.get("pagespeed", {})
    ws = data.get("website", {})

    if not ps and not ws.get("is_reachable", False):
        return 0, {"reason": "Website unreachable"}

    if not ps:
        # No PageSpeed data but site is reachable — give partial score
        return 30, {"reason": "PageSpeed data unavailable, site reachable"}

    mobile_perf = ps.get("mobile_performance_score", 0)
    desktop_perf = ps.get("desktop_performance_score", 0)
    mobile_fcp = ps.get("mobile_fcp_ms", 10000)
    mobile_lcp = ps.get("mobile_lcp_ms", 15000)

    # Mobile performance is weighted heavier (70/30) since most tradie
    # customers search on mobile
    perf_score = (mobile_perf * 0.7) + (desktop_perf * 0.3)

    # Penalize for slow load times
    if mobile_fcp > 5000:
        perf_score -= 15
    elif mobile_fcp > 3000:
        perf_score -= 8
    elif mobile_fcp > 1800:
        perf_score -= 3

    if mobile_lcp > 7000:
        perf_score -= 15
    elif mobile_lcp > 4000:
        perf_score -= 8
    elif mobile_lcp > 2500:
        perf_score -= 3

    score = max(0, min(100, round(perf_score)))

    breakdown = {
        "mobile_performance": mobile_perf,
        "desktop_performance": desktop_perf,
        "mobile_fcp_ms": mobile_fcp,
        "mobile_lcp_ms": mobile_lcp,
        "load_time_seconds": round(mobile_lcp / 1000, 1) if mobile_lcp else None,
    }

    return score, breakdown


def _score_mobile_experience(data: dict) -> Tuple[int, dict]:
    """
    Score mobile-friendliness from PageSpeed and direct scraping.
    """
    ps = data.get("pagespeed", {})
    ws = data.get("website", {})

    if not ws.get("is_reachable", False):
        return 0, {"reason": "Website unreachable"}

    score = 0
    breakdown = {}

    # Viewport meta tag (30 points)
    has_viewport = ps.get("has_mobile_viewport", ws.get("has_mobile_viewport", False))
    if has_viewport:
        score += 30
    breakdown["has_viewport"] = has_viewport

    # Mobile performance score (40 points, scaled)
    mobile_perf = ps.get("mobile_performance_score", 0)
    score += round(mobile_perf * 0.4)
    breakdown["mobile_performance"] = mobile_perf

    # Accessibility score suggests good mobile UX (15 points)
    a11y = ps.get("mobile_accessibility_score", 0)
    score += round(a11y * 0.15)
    breakdown["accessibility"] = a11y

    # Content layout shift — low CLS means stable mobile experience (15 points)
    cls_val = ps.get("mobile_cls", 1.0)
    if cls_val <= 0.1:
        score += 15
    elif cls_val <= 0.25:
        score += 10
    elif cls_val <= 0.5:
        score += 5
    breakdown["cls"] = cls_val

    return max(0, min(100, score)), breakdown


def _score_online_presence(data: dict) -> Tuple[int, dict]:
    """
    Score online visibility from Brave Search and website social links.
    """
    brave = data.get("brave", {})
    ws = data.get("website", {})

    score = 0
    breakdown = {}

    # Search results count (25 points)
    result_count = brave.get("search_result_count", 0)
    if result_count >= 1000:
        score += 25
    elif result_count >= 500:
        score += 20
    elif result_count >= 100:
        score += 15
    elif result_count >= 20:
        score += 10
    elif result_count > 0:
        score += 5
    breakdown["search_results"] = result_count

    # Social profiles found in search (25 points, 5 per platform)
    social_count = brave.get("social_profile_count", 0)
    page_social = ws.get("page_social_links", [])
    combined_social = social_count + len(page_social)
    social_score = min(25, combined_social * 5)
    score += social_score
    breakdown["social_profiles"] = combined_social

    # Google Business listing (20 points)
    has_gbp = brave.get("has_google_business", False)
    if has_gbp:
        score += 20
    breakdown["google_business"] = has_gbp

    # Directory listings (15 points)
    has_dirs = brave.get("has_directory_listings", False)
    if has_dirs:
        score += 15
    breakdown["directory_listings"] = has_dirs

    # Review mentions (15 points)
    review_mentions = brave.get("review_mentions", 0)
    if review_mentions >= 3:
        score += 15
    elif review_mentions >= 1:
        score += 10
    breakdown["review_mentions"] = review_mentions

    return max(0, min(100, score)), breakdown


def _score_seo_basics(data: dict) -> Tuple[int, dict]:
    """
    Score basic SEO setup from website scraping and PageSpeed.
    """
    ws = data.get("website", {})
    ps = data.get("pagespeed", {})

    if not ws.get("is_reachable", False):
        return 0, {"reason": "Website unreachable"}

    score = 0
    breakdown = {}

    # SSL / HTTPS (15 points)
    has_ssl = ws.get("has_ssl", False)
    if has_ssl:
        score += 15
    breakdown["has_ssl"] = has_ssl

    # Title tag present and reasonable length (15 points)
    has_title = ws.get("has_title", False)
    title = ws.get("title_tag", "")
    if has_title:
        score += 10
        if 30 <= len(title) <= 60:
            score += 5  # Optimal length
    breakdown["has_title"] = has_title
    breakdown["title_length"] = len(title)

    # Meta description (15 points)
    has_meta = ws.get("has_meta_description", False)
    meta = ws.get("meta_description", "")
    if has_meta:
        score += 10
        if 120 <= len(meta) <= 160:
            score += 5  # Optimal length
    breakdown["has_meta_description"] = has_meta

    # H1 tag (10 points)
    h1_count = ws.get("h1_count", 0)
    if h1_count == 1:
        score += 10  # Perfect — exactly one H1
    elif h1_count > 1:
        score += 5   # Multiple H1s — not ideal but present
    breakdown["h1_count"] = h1_count

    # Schema markup (10 points)
    if ws.get("has_schema_markup", False):
        score += 10
    breakdown["has_schema"] = ws.get("has_schema_markup", False)

    # Open Graph tags (5 points)
    if ws.get("has_og_tags", False):
        score += 5
    breakdown["has_og_tags"] = ws.get("has_og_tags", False)

    # Image alt tags (10 points)
    total_images = ws.get("total_images", 0)
    missing_alt = ws.get("images_without_alt", 0)
    if total_images > 0:
        alt_ratio = 1.0 - (missing_alt / total_images)
        score += round(alt_ratio * 10)
    else:
        score += 5  # No images is not a penalty
    breakdown["images_without_alt"] = missing_alt
    breakdown["total_images"] = total_images

    # PageSpeed SEO score (15 points, scaled)
    ps_seo = ps.get("mobile_seo_score", 0)
    score += round(ps_seo * 0.15)
    breakdown["pagespeed_seo"] = ps_seo

    # Content depth — word count (5 points)
    word_count = ws.get("word_count", 0)
    if word_count >= 500:
        score += 5
    elif word_count >= 200:
        score += 3
    elif word_count >= 50:
        score += 1
    breakdown["word_count"] = word_count

    return max(0, min(100, score)), breakdown


def _score_ai_readiness(data: dict, other_scores: dict) -> Tuple[int, dict]:
    """
    Score AI readiness — how much the business would benefit from AI.
    INVERSE logic: lower scores in other areas = HIGHER AI opportunity.
    A perfect website scores LOW on AI readiness (less need for AI).
    A terrible website scores HIGH (massive AI opportunity).
    """
    ws = data.get("website", {})

    breakdown = {}

    # Base: invert average of other scores
    other_avg = sum(other_scores.values()) / max(len(other_scores), 1)
    # Map: 0 other_avg -> 80 AI readiness, 100 other_avg -> 20 AI readiness
    base_score = max(20, min(80, round(80 - (other_avg * 0.6))))
    breakdown["base_opportunity"] = base_score

    score = base_score

    # Bonus for missing automation features
    if not ws.get("has_chat_widget", False):
        score += 8
        breakdown["no_chat_widget"] = True

    if not ws.get("has_booking_system", False):
        score += 7
        breakdown["no_booking_system"] = True

    if not ws.get("has_phone_visible", False):
        score += 5
        breakdown["phone_not_visible"] = True

    # If site is unreachable — maximum AI opportunity
    if not ws.get("is_reachable", False):
        score = 95
        breakdown["site_unreachable"] = True

    return max(0, min(100, score)), breakdown


# ---------------------------------------------------------------------------
# Main Analysis Functions
# ---------------------------------------------------------------------------

def calculate_scores(scraped_data: dict) -> dict:
    """
    Calculate all category scores from scraped data.
    Returns dict with category -> {score, breakdown}.
    """
    speed_score, speed_bd = _score_website_speed(scraped_data)
    mobile_score, mobile_bd = _score_mobile_experience(scraped_data)
    presence_score, presence_bd = _score_online_presence(scraped_data)
    seo_score, seo_bd = _score_seo_basics(scraped_data)

    preliminary = {
        "website_speed": speed_score,
        "mobile_experience": mobile_score,
        "online_presence": presence_score,
        "seo_basics": seo_score,
    }

    ai_score, ai_bd = _score_ai_readiness(scraped_data, preliminary)

    scores = {
        "website_speed": {"score": speed_score, "breakdown": speed_bd},
        "mobile_experience": {"score": mobile_score, "breakdown": mobile_bd},
        "online_presence": {"score": presence_score, "breakdown": presence_bd},
        "seo_basics": {"score": seo_score, "breakdown": seo_bd},
        "ai_readiness": {"score": ai_score, "breakdown": ai_bd},
    }

    return scores


def calculate_overall_score(scores: dict) -> int:
    """
    Weighted average of all category scores.
    """
    total = 0.0
    for category, weight in CATEGORY_WEIGHTS.items():
        cat_score = scores.get(category, {})
        if isinstance(cat_score, dict):
            total += cat_score.get("score", 0) * weight
        else:
            total += cat_score * weight
    return max(0, min(100, round(total)))


def generate_recommendations(scores: dict, scraped_data: dict) -> list:
    """
    Generate top 3-5 actionable recommendations based on weakest areas.
    Each recommendation is a dict with: category, title, detail, impact.
    """
    ws = scraped_data.get("website", {})
    ps = scraped_data.get("pagespeed", {})

    recommendations = []

    # Speed recommendations
    speed = scores.get("website_speed", {})
    speed_score = speed.get("score", 0) if isinstance(speed, dict) else speed
    speed_bd = speed.get("breakdown", {}) if isinstance(speed, dict) else {}

    if speed_score < 50:
        load_time = speed_bd.get("load_time_seconds")
        if load_time and load_time != "unknown":
            speed_detail = (
                f"Your site takes {load_time} seconds to load on mobile. "
                "53% of mobile visitors leave after just 3 seconds. "
                "An AI-powered website loads instantly and never loses a customer to slow speeds."
            )
        else:
            speed_detail = (
                "Your website is underperforming on speed. "
                "53% of mobile visitors leave after just 3 seconds of loading. "
                "An AI-powered website loads instantly and never loses a customer to slow speeds."
            )
        recommendations.append({
            "category": "Website Speed",
            "title": "Your website is too slow",
            "detail": speed_detail,
            "impact": "high",
            "priority": 100 - speed_score,
        })

    # Mobile recommendations
    mobile = scores.get("mobile_experience", {})
    mobile_score = mobile.get("score", 0) if isinstance(mobile, dict) else mobile
    mobile_bd = mobile.get("breakdown", {}) if isinstance(mobile, dict) else {}

    if mobile_score < 60:
        viewport = mobile_bd.get("has_viewport", False)
        if not viewport:
            detail = (
                "Your website isn't mobile-optimised. "
                "68% of Australian tradies' customers search on their phone. "
                "Without a mobile-friendly site, you're invisible to most potential customers."
            )
        else:
            detail = (
                "Your mobile experience needs improvement. "
                "Customers on phones see a slow, unstable layout. "
                "An AI receptionist captures leads 24/7 even when your website can't."
            )
        recommendations.append({
            "category": "Mobile Experience",
            "title": "Losing mobile customers",
            "detail": detail,
            "impact": "high",
            "priority": 100 - mobile_score,
        })

    # Online presence recommendations
    presence = scores.get("online_presence", {})
    presence_score = presence.get("score", 0) if isinstance(presence, dict) else presence
    presence_bd = presence.get("breakdown", {}) if isinstance(presence, dict) else {}

    if presence_score < 60:
        social_count = presence_bd.get("social_profiles", 0)
        has_gbp = presence_bd.get("google_business", False)

        if not has_gbp:
            detail = (
                "We couldn't find a Google Business Profile for your company. "
                "This is the #1 way local customers find tradies. "
                "Businesses with a Google profile get 7x more clicks than those without."
            )
        elif social_count < 2:
            detail = (
                "Your online presence is limited. Your competitors are showing up "
                "on social media, directories, and review sites — capturing leads "
                "that should be yours. AI can manage your online presence 24/7."
            )
        else:
            detail = (
                "We found limited online visibility for your business. "
                "Your competitors are getting the leads you're missing. "
                "An AI-powered presence manager keeps you visible everywhere, always."
            )
        recommendations.append({
            "category": "Online Presence",
            "title": "Competitors are more visible online",
            "detail": detail,
            "impact": "high" if presence_score < 30 else "medium",
            "priority": 100 - presence_score,
        })

    # SEO recommendations
    seo = scores.get("seo_basics", {})
    seo_score = seo.get("score", 0) if isinstance(seo, dict) else seo
    seo_bd = seo.get("breakdown", {}) if isinstance(seo, dict) else {}

    if seo_score < 60:
        issues = []
        if not seo_bd.get("has_ssl", True):
            issues.append("lacks HTTPS security")
        if not seo_bd.get("has_meta_description", True):
            issues.append("is missing its meta description")
        if not seo_bd.get("has_title", True):
            issues.append("has no proper title tag")
        if seo_bd.get("h1_count", 1) == 0:
            issues.append("is missing its main heading")

        if issues:
            issue_text = ", ".join(issues[:2])
            detail = (
                f"Your website {issue_text}. "
                "This means Google can't properly show your business in search results. "
                "You're invisible to customers actively searching for your services."
            )
        else:
            detail = (
                "Your SEO setup needs work. Google can't properly understand "
                "and rank your website. Customers searching for your services "
                "are finding your competitors instead."
            )
        recommendations.append({
            "category": "SEO Basics",
            "title": "Google can't find your business",
            "detail": detail,
            "impact": "medium",
            "priority": 100 - seo_score,
        })

    # AI readiness — always include this as a positive opportunity
    ai = scores.get("ai_readiness", {})
    ai_score_val = ai.get("score", 0) if isinstance(ai, dict) else ai
    ai_bd = ai.get("breakdown", {}) if isinstance(ai, dict) else {}

    if ai_score_val >= 50:
        missing = []
        if ai_bd.get("no_chat_widget"):
            missing.append("no live chat to capture after-hours leads")
        if ai_bd.get("no_booking_system"):
            missing.append("no online booking system")
        if ai_bd.get("phone_not_visible"):
            missing.append("phone number hard to find")

        if missing:
            missing_text = "; ".join(missing[:2])
            detail = (
                f"Your business has {missing_text}. "
                "An AI receptionist answers every call, books jobs, and follows up "
                "with customers — even at 2am. Your competitors who adopt AI first "
                "will capture the leads you're currently losing."
            )
        else:
            detail = (
                "Your business has significant opportunity to benefit from AI. "
                "An AI receptionist can answer every call, book jobs, and follow up "
                "automatically. Businesses using AI report 40% more booked jobs."
            )
        recommendations.append({
            "category": "AI Readiness",
            "title": "High opportunity for AI automation",
            "detail": detail,
            "impact": "high",
            "priority": ai_score_val,
        })

    # Sort by priority (highest first) and return top 5
    recommendations.sort(key=lambda r: r["priority"], reverse=True)
    return recommendations[:5]


def analyze(scraped_data: dict) -> Tuple[dict, list]:
    """
    Main analysis entry point.
    Returns (scores_dict, recommendations_list).
    """
    scores = calculate_scores(scraped_data)
    overall = calculate_overall_score(scores)
    recommendations = generate_recommendations(scores, scraped_data)

    # Add overall to scores
    scores["overall"] = {"score": overall}

    # Pass through competitor provider intelligence
    website = scraped_data.get("website") or {}
    scores["_provider_intel"] = {
        "detected_provider": website.get("detected_provider"),
        "est_monthly_cost": website.get("est_monthly_cost"),
    }

    return scores, recommendations
