#!/usr/bin/env python3
"""
Supadata YouTube Transcript Fetcher

Fetches YouTube video transcripts via Supadata.ai API for Genesis knowledge ingestion.

Usage:
    python fetch_transcript.py --url "https://youtube.com/watch?v=VIDEO_ID"
    python fetch_transcript.py --batch videos.txt --output-dir ./transcripts/

Author: Genesis System
Version: 1.0.0
"""

import argparse
import json
import os
import sys
import time
import re
from datetime import datetime
from pathlib import Path
from typing import Optional, Dict, Any, List

try:
    import requests
except ImportError:
    print("Error: requests library required. Install with: pip install requests")
    sys.exit(1)


class SupadataClient:
    """Client for Supadata.ai YouTube Transcript API"""

    BASE_URL = "https://api.supadata.ai/v1"

    def __init__(self, api_key: str):
        self.api_key = api_key
        self.session = requests.Session()
        self.session.headers.update({
            "x-api-key": api_key,
            "Content-Type": "application/json"
        })

    def extract_video_id(self, url: str) -> str:
        """Extract video ID from YouTube URL or return as-is if already an ID"""
        # Handle direct video IDs
        if re.match(r'^[a-zA-Z0-9_-]{11}$', url):
            return url

        # Handle various YouTube URL formats
        patterns = [
            r'(?:youtube\.com/watch\?v=|youtu\.be/|youtube\.com/embed/)([a-zA-Z0-9_-]{11})',
            r'youtube\.com/v/([a-zA-Z0-9_-]{11})',
            r'youtube\.com/shorts/([a-zA-Z0-9_-]{11})',
        ]

        for pattern in patterns:
            match = re.search(pattern, url)
            if match:
                return match.group(1)

        raise ValueError(f"Could not extract video ID from: {url}")

    def get_transcript(
        self,
        url: str,
        lang: Optional[str] = None,
        mode: str = "auto",
        text: bool = False,
        chunk_size: Optional[int] = None
    ) -> Dict[str, Any]:
        """
        Fetch transcript for a YouTube video.

        Args:
            url: YouTube video URL or ID
            lang: Preferred language (ISO 639-1 code)
            mode: 'native' (existing only), 'generate' (AI), 'auto' (try native, fallback to generate)
            text: Return plain text instead of timestamped chunks
            chunk_size: Max characters per chunk (only when text=False)

        Returns:
            Transcript data with content, language info, and metadata
        """
        video_id = self.extract_video_id(url)

        # Build request parameters
        params = {
            "url": f"https://www.youtube.com/watch?v={video_id}",
            "mode": mode
        }

        if lang:
            params["lang"] = lang
        if text:
            params["text"] = "true"
        if chunk_size:
            params["chunkSize"] = chunk_size

        # Make request
        response = self.session.get(f"{self.BASE_URL}/transcript", params=params)

        # Handle async processing (HTTP 202)
        if response.status_code == 202:
            data = response.json()
            job_id = data.get("jobId")
            if job_id:
                return self._poll_job(job_id, video_id)

        # Handle errors
        if response.status_code == 404:
            raise ValueError(f"Video not found or private: {video_id}")
        elif response.status_code == 403:
            raise PermissionError(f"Access denied for video: {video_id}")
        elif response.status_code == 206:
            raise ValueError(f"Transcript unavailable for video: {video_id}")
        elif response.status_code == 429:
            raise RuntimeError("Rate limited - wait before retrying")
        elif response.status_code != 200:
            raise RuntimeError(f"API error {response.status_code}: {response.text}")

        # Success
        data = response.json()
        data["video_id"] = video_id
        data["fetched_at"] = datetime.utcnow().isoformat() + "Z"

        return data

    def _poll_job(self, job_id: str, video_id: str, max_attempts: int = 60) -> Dict[str, Any]:
        """Poll for async job completion"""
        for attempt in range(max_attempts):
            time.sleep(1)  # Poll every 1 second as recommended

            response = self.session.get(f"{self.BASE_URL}/transcript/{job_id}")

            if response.status_code == 200:
                data = response.json()

                # Check if still processing
                status = data.get("status")
                if status == "queued" or status == "active":
                    continue
                elif status == "failed":
                    raise RuntimeError(f"Job failed for video {video_id}")

                # Completed
                data["video_id"] = video_id
                data["fetched_at"] = datetime.utcnow().isoformat() + "Z"
                return data

            elif response.status_code == 202:
                # Still processing
                continue
            else:
                raise RuntimeError(f"Job poll error: {response.status_code}")

        raise TimeoutError(f"Job timed out for video {video_id}")


def load_api_key() -> str:
    """Load API key from environment or secrets file"""
    # Check environment variable first
    api_key = os.environ.get("SUPADATA_API_KEY")
    if api_key:
        return api_key

    # Check secrets.env file
    secrets_paths = [
        Path("/mnt/e/genesis-system/config/secrets.env"),
        Path.home() / ".genesis" / "secrets.env",
    ]

    for secrets_path in secrets_paths:
        if secrets_path.exists():
            with open(secrets_path) as f:
                for line in f:
                    if line.startswith("SUPADATA_API_KEY="):
                        return line.strip().split("=", 1)[1]

    raise ValueError("SUPADATA_API_KEY not found in environment or config/secrets.env")


def fetch_single(
    client: SupadataClient,
    url: str,
    output: Optional[str],
    lang: Optional[str],
    mode: str,
    text: bool
) -> Dict[str, Any]:
    """Fetch a single transcript"""
    print(f"Fetching transcript for: {url}")

    try:
        result = client.get_transcript(url, lang=lang, mode=mode, text=text)

        if output:
            output_path = Path(output)
            output_path.parent.mkdir(parents=True, exist_ok=True)
            with open(output_path, "w") as f:
                json.dump(result, f, indent=2)
            print(f"Saved to: {output}")

        return result

    except Exception as e:
        print(f"Error: {e}")
        return {"error": str(e), "url": url}


def fetch_batch(
    client: SupadataClient,
    batch_file: str,
    output_dir: str,
    lang: Optional[str],
    mode: str,
    text: bool
) -> List[Dict[str, Any]]:
    """Fetch transcripts for multiple videos from a file"""
    urls = []
    with open(batch_file) as f:
        for line in f:
            line = line.strip()
            if line and not line.startswith("#"):
                urls.append(line)

    print(f"Processing {len(urls)} videos...")

    output_path = Path(output_dir)
    output_path.mkdir(parents=True, exist_ok=True)

    results = []
    for i, url in enumerate(urls, 1):
        print(f"[{i}/{len(urls)}] ", end="")

        try:
            result = client.get_transcript(url, lang=lang, mode=mode, text=text)
            video_id = result.get("video_id", f"unknown_{i}")

            with open(output_path / f"{video_id}.json", "w") as f:
                json.dump(result, f, indent=2)

            results.append(result)
            print(f"OK: {video_id}")

        except Exception as e:
            print(f"ERROR: {e}")
            results.append({"error": str(e), "url": url})

        # Rate limiting: wait between requests
        if i < len(urls):
            time.sleep(0.5)

    # Summary
    success = sum(1 for r in results if "error" not in r)
    print(f"\nCompleted: {success}/{len(urls)} successful")

    return results


def main():
    parser = argparse.ArgumentParser(
        description="Fetch YouTube transcripts via Supadata.ai API"
    )

    parser.add_argument(
        "--url",
        help="YouTube video URL or ID"
    )
    parser.add_argument(
        "--batch",
        help="File with list of YouTube URLs (one per line)"
    )
    parser.add_argument(
        "--output", "-o",
        help="Output file path for single video"
    )
    parser.add_argument(
        "--output-dir",
        help="Output directory for batch processing"
    )
    parser.add_argument(
        "--lang",
        help="Preferred transcript language (ISO 639-1 code)"
    )
    parser.add_argument(
        "--mode",
        choices=["native", "generate", "auto"],
        default="auto",
        help="Transcript mode: native (existing only), generate (AI), auto (default)"
    )
    parser.add_argument(
        "--text",
        action="store_true",
        help="Return plain text instead of timestamped chunks"
    )
    parser.add_argument(
        "--api-key",
        help="Supadata API key (defaults to SUPADATA_API_KEY env var)"
    )

    args = parser.parse_args()

    # Validate arguments
    if not args.url and not args.batch:
        parser.error("Either --url or --batch is required")

    if args.batch and not args.output_dir:
        parser.error("--output-dir is required with --batch")

    # Load API key
    api_key = args.api_key or load_api_key()
    client = SupadataClient(api_key)

    # Execute
    if args.url:
        result = fetch_single(
            client, args.url, args.output, args.lang, args.mode, args.text
        )
        if not args.output:
            print(json.dumps(result, indent=2))
    else:
        fetch_batch(
            client, args.batch, args.output_dir, args.lang, args.mode, args.text
        )


if __name__ == "__main__":
    main()
