#!/usr/bin/env python3
"""
MEGA TRADIE SCRAPER — Genesis ReceptionistAI Lead Pipeline
Targets: LocalSearch.com.au (primary), with extensible architecture for TrueLocal, HiPages
Trades: 11 trade categories
Cities: 20 Australian cities across all states
Output: PostgreSQL tradie_leads table + CSV backup
Rate limiting: polite scraping with random delays + dedup by phone

Usage:
    python mega_tradie_scraper.py [--trades all|plumbers|electricians,...] [--cities all|brisbane,...] [--limit N]
"""

import asyncio
import csv
import re
import random
import os
import sys
import json
import argparse
import logging
from pathlib import Path
from datetime import date, datetime

sys.path.append('E:/genesis-system/data/genesis-memory')

# ─── CONFIG ────────────────────────────────────────────────────────────────────

TRADES = {
    "plumbers":              "plumber",
    "electricians":          "electrician",
    "builders":              "builder",
    "concreters":            "concreter",
    "roofing-contractors":   "roofing-contractor",
    "painters":              "painter",
    "air-conditioning":      "air-conditioning",
    "landscapers":           "landscaper",
    "tilers":                "tiler",
    "fencing-contractors":   "fencing-contractor",
    "carpenters":            "carpenter",
}

CITIES = [
    # QLD
    "brisbane-qld", "gold-coast-qld", "sunshine-coast-qld",
    "cairns-qld", "townsville-qld", "toowoomba-qld",
    # NSW
    "sydney-nsw", "newcastle-nsw", "wollongong-nsw", "central-coast-nsw",
    # VIC
    "melbourne-vic", "geelong-vic", "ballarat-vic",
    # WA
    "perth-wa", "fremantle-wa",
    # SA
    "adelaide-sa",
    # ACT
    "canberra-act",
    # TAS
    "hobart-tas",
    # NT
    "darwin-nt",
]

OUTPUT_DIR = Path("E:/genesis-system/data/LEADS")
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
CSV_FILE = OUTPUT_DIR / "mega_tradies_localsearch.csv"
LOG_FILE = OUTPUT_DIR / "mega_scraper.log"
TODAY = date.today().isoformat()

FIELDNAMES = [
    "business_name", "trade_category", "phone", "website",
    "street_address", "suburb", "state", "postcode", "email",
    "rating", "review_count", "source", "source_url", "scraped_date"
]

logging.basicConfig(
    filename=str(LOG_FILE),
    level=logging.INFO,
    format="%(asctime)s %(levelname)s %(message)s",
    filemode='a'
)

# ─── DB SETUP ──────────────────────────────────────────────────────────────────

def get_db_conn():
    try:
        from elestio_config import PostgresConfig
        import psycopg2
        return psycopg2.connect(**PostgresConfig.get_connection_params())
    except Exception as e:
        logging.warning(f"DB connection failed: {e}")
        return None

def ensure_tradie_leads_table(conn):
    """Create tradie_leads table if not exists with full schema."""
    if not conn:
        return
    try:
        cur = conn.cursor()
        cur.execute("""
            CREATE TABLE IF NOT EXISTS tradie_leads (
                id              SERIAL PRIMARY KEY,
                business_name   TEXT NOT NULL,
                trade_category  TEXT,
                phone           TEXT,
                website         TEXT,
                street_address  TEXT,
                suburb          TEXT,
                state           TEXT,
                postcode        TEXT,
                email           TEXT,
                rating          TEXT,
                review_count    TEXT,
                source          TEXT DEFAULT 'localsearch',
                source_url      TEXT,
                scraped_date    TEXT,
                has_website     BOOLEAN,
                website_score   INTEGER,
                priority_score  FLOAT,
                outreach_status TEXT DEFAULT 'not_contacted',
                created_at      TIMESTAMP DEFAULT NOW(),
                UNIQUE(phone)
            )
        """)
        conn.commit()
        logging.info("tradie_leads table ready")
    except Exception as e:
        conn.rollback()
        logging.error(f"Table creation error: {e}")

def load_existing_phones_from_db(conn):
    """Load existing phones to avoid duplicates."""
    if not conn:
        return set()
    try:
        cur = conn.cursor()
        cur.execute("SELECT phone FROM tradie_leads WHERE phone IS NOT NULL AND phone != ''")
        return {r[0] for r in cur.fetchall()}
    except Exception as e:
        logging.error(f"Error loading phones: {e}")
        return set()

def insert_lead(conn, lead: dict):
    """Insert a single lead into PostgreSQL, skip on phone conflict."""
    if not conn:
        return False
    try:
        cur = conn.cursor()
        cur.execute("""
            INSERT INTO tradie_leads
                (business_name, trade_category, phone, website, street_address,
                 suburb, state, postcode, email, rating, review_count,
                 source, source_url, scraped_date)
            VALUES
                (%(business_name)s, %(trade_category)s, %(phone)s, %(website)s,
                 %(street_address)s, %(suburb)s, %(state)s, %(postcode)s,
                 %(email)s, %(rating)s, %(review_count)s,
                 %(source)s, %(source_url)s, %(scraped_date)s)
            ON CONFLICT (phone) DO NOTHING
        """, lead)
        conn.commit()
        return cur.rowcount > 0
    except Exception as e:
        conn.rollback()
        logging.error(f"Insert error: {e}")
        return False

# ─── SCRAPING CORE ─────────────────────────────────────────────────────────────

async def extract_listing(listing, page_url: str, trade_label: str) -> dict | None:
    """Extract all fields from a single listing card element."""
    try:
        # Business name — try multiple selectors
        name = None
        for sel in [
            '[data-testid="listing-name"]',
            'h2', 'h3',
            '.business-name', '.listing-name',
        ]:
            el = await listing.query_selector(sel)
            if el:
                name = (await el.text_content() or "").strip()
                if name:
                    break
        if not name:
            return None

        # Phone
        phone = ""
        phone_el = await listing.query_selector('a[href^="tel:"]')
        if phone_el:
            href = await phone_el.get_attribute('href') or ''
            phone = href.replace('tel:', '').strip()
            phone = re.sub(r'[^\d+\-\s()]', '', phone).strip()

        # Website
        website = ""
        for sel in ['a[data-ga-action="Website"]', 'a[data-testid="website-link"]']:
            web_el = await listing.query_selector(sel)
            if web_el:
                website = (await web_el.get_attribute('href') or '').strip()
                if website:
                    break

        # Address
        street_address, suburb, state, postcode = '', '', '', ''
        addr_el = await listing.query_selector('[data-testid="listing-address"], address')
        if addr_el:
            addr_text = (await addr_el.text_content() or '').strip()
            parts = [p.strip() for p in addr_text.split(',')]
            if len(parts) >= 2:
                street_address = parts[0]
                last_part = parts[-1]
                m = re.search(r'^(.*?)\s*([A-Z]{2,3})\s*(\d{4})$', last_part.strip())
                if m:
                    suburb = m.group(1).strip() or (parts[-2] if len(parts) > 2 else '')
                    state = m.group(2)
                    postcode = m.group(3)

        # Rating
        rating = ''
        rating_el = await listing.query_selector('span[aria-label$="average rating"], [data-testid="rating"]')
        if rating_el:
            aria = await rating_el.get_attribute('aria-label') or ''
            m = re.search(r'([\d.]+)', aria or await rating_el.text_content() or '')
            if m:
                rating = m.group(1)

        # Review count
        review_count = ''
        for sel in ['p.sc-kpDqfm', '.review-count', '[data-testid="review-count"]']:
            rev_el = await listing.query_selector(sel)
            if rev_el:
                rev_text = await rev_el.text_content() or ''
                m = re.search(r'(\d+)', rev_text)
                if m:
                    review_count = m.group(1)
                    break

        # Source URL (profile link)
        src_url = page_url
        profile_el = await listing.query_selector('a[href*="/profile/"]')
        if profile_el:
            href = await profile_el.get_attribute('href') or ''
            if href.startswith('/'):
                src_url = f'https://www.localsearch.com.au{href}'
            elif href.startswith('http'):
                src_url = href

        return {
            "business_name":  name,
            "trade_category": trade_label,
            "phone":          phone,
            "website":        website,
            "street_address": street_address,
            "suburb":         suburb,
            "state":          state,
            "postcode":       postcode,
            "email":          "",
            "rating":         rating,
            "review_count":   review_count,
            "source":         "localsearch",
            "source_url":     src_url,
            "scraped_date":   TODAY,
        }
    except Exception as e:
        logging.debug(f"Listing extract error: {e}")
        return None


async def scrape_trade_city(page, category: str, city: str, trade_label: str,
                            existing_phones: set, csv_writer, conn,
                            max_pages: int = 20) -> int:
    """Scrape all pages for one trade+city combo. Returns new record count."""
    base_url = f"https://www.localsearch.com.au/find/{category}/{city}"
    new_count = 0

    for page_num in range(1, max_pages + 1):
        url = base_url if page_num == 1 else f"{base_url}?page={page_num}"
        logging.info(f"Scraping: {url}")

        try:
            await page.goto(url, wait_until="domcontentloaded", timeout=60000)
            # Try to wait for listing cards
            try:
                await page.wait_for_selector('div[data-testid="listing-card"]', timeout=15000)
            except Exception:
                # Check if no results page
                content = await page.content()
                if 'no results' in content.lower() or 'sorry' in content.lower():
                    logging.info(f"No results at {url}")
                    break
                # Might still have results with different selector
                pass

            listings = await page.query_selector_all('div[data-testid="listing-card"]')
            if not listings:
                # Try alternate selectors
                listings = await page.query_selector_all('article.listing-card, .search-result-card')
            if not listings:
                logging.info(f"No listings found on page {page_num} for {category}/{city}")
                break

            page_new = 0
            for listing in listings:
                data = await extract_listing(listing, url, trade_label)
                if not data:
                    continue
                if not data['business_name']:
                    continue
                # Dedup by phone
                phone_key = data['phone']
                if phone_key and phone_key in existing_phones:
                    continue
                if phone_key:
                    existing_phones.add(phone_key)

                # Write to CSV
                csv_writer.writerow(data)
                # Write to DB
                insert_lead(conn, data)
                page_new += 1
                new_count += 1

            print(f"  {category}/{city} p{page_num}: +{page_new} | total new: {new_count}")

            if page_new == 0:
                break  # No new results on this page

            # Check for next page button
            next_btn = await page.query_selector('a[aria-label="Next page"], [data-testid="next-page"]')
            if not next_btn:
                break

            # Polite delay
            await asyncio.sleep(random.uniform(1.5, 3.0))

        except Exception as e:
            logging.error(f"Error scraping {url}: {e}")
            print(f"  ERROR on {url}: {e}")
            break

    return new_count


# ─── MAIN ──────────────────────────────────────────────────────────────────────

async def main(selected_trades=None, selected_cities=None, max_pages=20, limit=None):
    from playwright.async_api import async_playwright

    # Filter trades/cities
    trades_to_run = {k: v for k, v in TRADES.items()
                     if selected_trades is None or k in selected_trades}
    cities_to_run = [c for c in CITIES
                     if selected_cities is None or c in selected_cities]

    print(f"\n=== MEGA TRADIE SCRAPER ===")
    print(f"Trades: {list(trades_to_run.keys())}")
    print(f"Cities: {cities_to_run}")
    print(f"Output: {CSV_FILE}")
    print(f"Started: {datetime.now().isoformat()}")
    print("=" * 40)

    # DB setup
    conn = get_db_conn()
    ensure_tradie_leads_table(conn)
    existing_phones = load_existing_phones_from_db(conn)

    # Also load phones from existing CSV to avoid duplicates with prior runs
    if CSV_FILE.exists():
        try:
            with open(CSV_FILE, encoding='utf-8') as f:
                reader = csv.DictReader(f)
                for row in reader:
                    p = row.get('phone', '').strip()
                    if p:
                        existing_phones.add(p)
        except Exception:
            pass

    print(f"Pre-loaded {len(existing_phones):,} existing phones for dedup")

    # CSV setup
    write_header = not CSV_FILE.exists() or CSV_FILE.stat().st_size == 0
    csv_file_handle = open(CSV_FILE, 'a', newline='', encoding='utf-8')
    csv_writer = csv.DictWriter(csv_file_handle, fieldnames=FIELDNAMES)
    if write_header:
        csv_writer.writeheader()

    total_new = 0
    jobs_run = 0

    try:
        async with async_playwright() as p:
            browser = await p.chromium.launch(headless=True)
            context = await browser.new_context(
                user_agent='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 '
                           '(KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36'
            )
            page = await context.new_page()

            for category, label in trades_to_run.items():
                for city in cities_to_run:
                    if limit and total_new >= limit:
                        print(f"\nLimit of {limit} reached. Stopping.")
                        break

                    print(f"\n--- Scraping: {category} / {city} ---")
                    count = await scrape_trade_city(
                        page, category, city, label,
                        existing_phones, csv_writer, conn,
                        max_pages=max_pages
                    )
                    total_new += count
                    jobs_run += 1

                    csv_file_handle.flush()

                    # Inter-city delay
                    await asyncio.sleep(random.uniform(2.0, 5.0))

                if limit and total_new >= limit:
                    break

            await browser.close()

    finally:
        csv_file_handle.close()
        if conn:
            conn.close()

    # Final report
    print(f"\n{'='*40}")
    print(f"MEGA SCRAPE COMPLETE")
    print(f"Jobs run:      {jobs_run}")
    print(f"New leads:     {total_new:,}")
    print(f"Output CSV:    {CSV_FILE}")
    print(f"Finished:      {datetime.now().isoformat()}")

    # Write summary to file for logging
    summary = {
        "run_date": TODAY,
        "jobs_run": jobs_run,
        "new_leads": total_new,
        "csv_output": str(CSV_FILE),
        "finished_at": datetime.now().isoformat(),
    }
    summary_path = OUTPUT_DIR / "mega_scraper_last_run.json"
    with open(summary_path, 'w') as f:
        json.dump(summary, f, indent=2)

    return total_new


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='Mega Tradie Scraper')
    parser.add_argument('--trades', nargs='+', default=None,
                        help='Trade categories to scrape (default: all)')
    parser.add_argument('--cities', nargs='+', default=None,
                        help='Cities to scrape (default: all)')
    parser.add_argument('--max-pages', type=int, default=20,
                        help='Max pages per trade/city combo (default: 20)')
    parser.add_argument('--limit', type=int, default=None,
                        help='Stop after N new leads (for testing)')
    args = parser.parse_args()

    asyncio.run(main(
        selected_trades=args.trades,
        selected_cities=args.cities,
        max_pages=args.max_pages,
        limit=args.limit,
    ))
