#!/usr/bin/env python3
"""
YouTube Daily Harvest
======================
Unified automation script to harvest 100+ high-value YouTube videos per day.

1. Discover: RSS feeds from 15+ channels
2. Score: Relevance matching
3. Extract: Turbo Transcript Engine (Async)
4. Analyze: Keyword & Topic extraction
5. Ingest: Save to JSON Knowledge Base

Author: Genesis System
"""

import asyncio
import json
import logging
import os
import re
import time
import urllib.request
import xml.etree.ElementTree as ET
from datetime import datetime, timedelta
from pathlib import Path
from typing import List, Dict, Any

# Windows native paths
GENESIS_ROOT = Path("e:/genesis-system")
CHANNELS_FILE = GENESIS_ROOT / "data" / "youtube_scout_channels.json"
OUTPUT_DIR = GENESIS_ROOT / "data" / "youtube_knowledge_base" / "transcripts"
SCOUT_DIR = GENESIS_ROOT / "data" / "youtube_scout"

# Make sure imports from core work
import sys
sys.path.insert(0, str(GENESIS_ROOT))

from core.youtube.transcript_engine import TranscriptEngine

logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s")
logger = logging.getLogger("yt_harvest")

def fetch_rss_feed(channel_id: str, hours_lookback: int) -> List[Dict]:
    """Fetch video metadata from channel RSS feed."""
    videos = []
    cutoff = datetime.utcnow() - timedelta(hours=hours_lookback)
    url = f"https://www.youtube.com/feeds/videos.xml?channel_id={channel_id}"
    
    try:
        req = urllib.request.Request(url, headers={'User-Agent': 'Mozilla/5.0'})
        with urllib.request.urlopen(req, timeout=10) as response:
            xml_data = response.read()
            
        root = ET.fromstring(xml_data)
        ns = {"atom": "http://www.w3.org/2005/Atom", "yt": "http://www.youtube.com/xml/schemas/2015"}
        
        for entry in root.findall("atom:entry", ns):
            video_id = entry.find("yt:videoId", ns)
            title = entry.find("atom:title", ns)
            published = entry.find("atom:published", ns)
            
            if video_id is not None and title is not None and published is not None:
                pub_date = datetime.fromisoformat(published.text.replace("Z", "+00:00")).replace(tzinfo=None)
                if pub_date > cutoff:
                    videos.append({
                        "video_id": video_id.text,
                        "title": title.text,
                        "published_at": published.text,
                        "channel_id": channel_id
                    })
    except Exception as e:
        logger.warning(f"Error fetching RSS for {channel_id}: {e}")
        
    return videos

def score_video(title: str, config: Dict) -> float:
    """Score video relevance based on keywords."""
    score = 0.0
    title_lower = title.lower()
    for kw in config.get("relevance_keywords", []):
        if kw.lower() in title_lower:
            score += 0.2
    # High value patterns
    high_value_patterns = [r"tutorial", r"how to", r"guide", r"breakthrough", r"game.?changer", r"secret", r"new feature", r"update", r"release", r"review"]
    for pattern in high_value_patterns:
        if re.search(pattern, title_lower):
            score += 0.15
    return min(score, 1.0)

async def run_harvest(hours_lookback: int = 48, max_videos: int = 100, dry_run: bool = False):
    logger.info(f"Starting YouTube Daily Harvest (lookback: {hours_lookback}h, max: {max_videos})")
    
    # 1. Load config
    if not CHANNELS_FILE.exists():
        logger.error(f"Channels file not found: {CHANNELS_FILE}")
        return
        
    with open(CHANNELS_FILE) as f:
        config = json.load(f)
        
    OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
    SCOUT_DIR.mkdir(parents=True, exist_ok=True)
        
    # 2. Discover Videos
    logger.info("Discovering videos from RSS feeds...")
    candidates = []
    
    # Simple rate limiting for RSS fetches
    for channel in config.get("channels", []):
        cid = channel.get("id")
        name = channel.get("name")
        if not cid or cid == "PENDING_LOOKUP":
            continue
            
        logger.info(f"Checking channel: {name} ({cid})")
        vids = fetch_rss_feed(cid, hours_lookback)
        for v in vids:
            v["channel_name"] = name
            v["score"] = score_video(v["title"], config)
        candidates.extend(vids)
        await asyncio.sleep(0.5)
        
    # Sort by score and take top
    candidates.sort(key=lambda x: x["score"], reverse=True)
    to_process = candidates[:max_videos]
    logger.info(f"Found {len(candidates)} videos, selected top {len(to_process)} for extraction")
    
    if not to_process:
        logger.info("No new videos to process.")
        return
        
    if dry_run:
        for i, v in enumerate(to_process):
            logger.info(f"DRY RUN - [{v['score']:.2f}] {v['title']} ({v['video_id']})")
        return
        
    # 3. Extract Transcripts (10 concurrent)
    video_ids = [v["video_id"] for v in to_process]
    engine = TranscriptEngine(max_concurrent=10)
    logger.info("Extracting transcripts...")
    
    start_time = time.time()
    results = await engine.extract_batch(video_ids)
    
    success_count = sum(1 for r in results.values() if r.success)
    logger.info(f"Extracted {success_count}/{len(video_ids)} transcripts in {time.time()-start_time:.1f}s")
    
    # 4 & 5. Analyze & Ingest
    logger.info("Ingesting transcripts...")
    extracted_data = []
    
    for v in to_process:
        vid = v["video_id"]
        res = results.get(vid)
        if res and res.success:
            # Simple keyword extraction
            text_lower = res.full_text.lower()
            found_kws = [kw for kw in config.get("relevance_keywords", []) if kw.lower() in text_lower]
            
            item = {
                "video_id": vid,
                "title": v["title"],
                "channel": v["channel_name"],
                "published_at": v["published_at"],
                "score": v["score"],
                "words": len(res.full_text.split()),
                "text": res.full_text,
                "topics_detected": found_kws,
                "extraction_method": res.method,
                "processed_at": datetime.utcnow().isoformat() + "Z"
            }
            extracted_data.append(item)
            
            # Save individual JSON to KB
            out_path = OUTPUT_DIR / f"{vid}.json"
            with open(out_path, "w", encoding="utf-8") as f:
                json.dump(item, f, indent=2)
                
    # 6. Save Nightly Report
    report_path = SCOUT_DIR / f"harvest_report_{datetime.utcnow().strftime('%Y%m%d_%H%M%S')}.json"
    report = {
        "timestamp": datetime.utcnow().isoformat() + "Z",
        "videos_scanned": len(candidates),
        "transcripts_extracted": len(extracted_data),
        "total_words": sum(item["words"] for item in extracted_data),
        "top_keywords": {}, 
        "videos": [{"id": e["video_id"], "title": e["title"], "score": e["score"]} for e in extracted_data]
    }
    with open(report_path, "w", encoding="utf-8") as f:
        json.dump(report, f, indent=2)
        
    logger.info(f"Harvest complete. Report saved: {report_path.name}")

if __name__ == "__main__":
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument("--hours", type=int, default=48, help="Lookback hours")
    parser.add_argument("--max-videos", type=int, default=100, help="Max videos to process")
    parser.add_argument("--dry-run", action="store_true", help="Do not extract transcripts")
    args = parser.parse_args()
    
    asyncio.run(run_harvest(args.hours, args.max_videos, args.dry_run))
