#!/usr/bin/env python3
"""
Genesis Deep Think Bridge
Connects Gemini Deep Think/Thinking Mode directly to Genesis.

THREE MODES:
1. API Mode (gemini-2.5-flash or gemini-2.5-pro with ThinkingConfig)
2. CLI Mode (gemini CLI with model selection)
3. Browser Mode (Playwright automation of gemini.google.com for Deep Think web UI)

Usage:
    # API mode (preferred for automation)
    python deep_think_bridge.py --mode api --prompt "Your query here"

    # CLI mode (uses Gemini CLI)
    python deep_think_bridge.py --mode cli --prompt "Your query here"

    # Browser mode (for Deep Think web UI only)
    python deep_think_bridge.py --mode browser --prompt "Your query here"

    # From file
    python deep_think_bridge.py --mode api --file /path/to/prompt.txt

    # Save output
    python deep_think_bridge.py --mode api --prompt "Query" --output result.txt

    # CTM (Commit To Memory after response)
    python deep_think_bridge.py --mode api --prompt "Query" --ctm
"""

import argparse
import json
import os
import sys
import time
from pathlib import Path
from typing import Optional

# Add parent directory to path for imports
sys.path.append(str(Path(__file__).parent.parent))


def get_api_key() -> Optional[str]:
    """Get Gemini API key from environment or credentials."""
    # Try environment first
    api_key = os.environ.get("GEMINI_API_KEY")
    if api_key:
        return api_key

    # Try credentials file
    creds_file = Path(__file__).parent.parent / "Credentials" / "gemini_api_key.txt"
    if creds_file.exists():
        return creds_file.read_text().strip()

    return None


def api_mode(prompt: str, model: str = "gemini-2.5-flash", thinking_budget: int = 8192) -> dict:
    """
    Use Gemini API with thinking mode enabled.

    Models supporting thinking:
    - gemini-2.5-flash (uses ThinkingConfig with include_thoughts=True)
    - gemini-2.5-pro (uses ThinkingConfig with include_thoughts=True)

    Args:
        prompt: The query to send
        model: Model ID
        thinking_budget: Thinking token budget (default: 8192, up to 24576)

    Returns:
        dict with 'thinking' and 'response' keys
    """
    try:
        # Try new google.genai package first (recommended)
        try:
            import google.genai as genai
            using_new_api = True
        except ImportError:
            # Fall back to old google.generativeai (deprecated)
            import google.generativeai as genai
            using_new_api = False
    except ImportError:
        print("ERROR: Neither google-genai nor google-generativeai installed.")
        print("Install with: pip install google-genai")
        sys.exit(1)

    api_key = get_api_key()
    if not api_key:
        print("ERROR: No GEMINI_API_KEY found in environment or Credentials/gemini_api_key.txt")
        sys.exit(1)

    print(f"🧠 Sending to {model} with thinking enabled...")
    start_time = time.time()

    if using_new_api:
        # New API — use ThinkingConfig with include_thoughts=True to get thought blocks
        client = genai.Client(api_key=api_key)
        response = client.models.generate_content(
            model=model,
            contents=prompt,
            config=genai.types.GenerateContentConfig(
                thinking_config=genai.types.ThinkingConfig(
                    thinking_budget=thinking_budget,
                    include_thoughts=True
                )
            )
        )

        elapsed = time.time() - start_time
        print(f"✅ Response received in {elapsed:.2f}s")

        # Extract thoughts and response text from parts separately
        thought_text = ""
        response_text = ""
        for part in response.candidates[0].content.parts:
            if getattr(part, 'thought', False):
                thought_text += part.text
            else:
                response_text += part.text

        return {
            "model": model,
            "thinking_budget": thinking_budget,
            "elapsed_seconds": elapsed,
            "thinking": thought_text.strip(),
            "response": response_text.strip(),
            "raw": response_text.strip()
        }

    else:
        # Old API configuration (deprecated but still works)
        print(f"⚠️  Using deprecated google.generativeai API (consider upgrading to google-genai)")
        genai.configure(api_key=api_key)

        generation_config = {
            "temperature": 1.0,
            "max_output_tokens": 16384,
        }

        model_obj = genai.GenerativeModel(
            model_name=model,
            generation_config=generation_config
        )

        response = model_obj.generate_content(prompt)
        response_text = response.text

        elapsed = time.time() - start_time
        print(f"✅ Response received in {elapsed:.2f}s")

        return {
            "model": model,
            "thinking_budget": thinking_budget,
            "elapsed_seconds": elapsed,
            "thinking": "",
            "response": response_text,
            "raw": response_text
        }


def cli_mode(prompt: str, model: str = "gemini-2.5-pro") -> dict:
    """
    Use Gemini CLI with specified model.

    NOTE: Gemini CLI doesn't have a --thinking flag as of v0.28.2.
    The thinking display is controlled in .gemini/settings.json.

    Args:
        prompt: The query to send
        model: Model ID

    Returns:
        dict with 'response' key (thinking included if configured in settings)
    """
    import subprocess

    # Check if gemini CLI is available
    try:
        subprocess.run(["gemini", "--version"], capture_output=True, check=True)
    except (subprocess.CalledProcessError, FileNotFoundError):
        print("ERROR: gemini CLI not found. Install from https://github.com/google-gemini/gemini-cli")
        sys.exit(1)

    print(f"🧠 Sending to Gemini CLI (model: {model})...")
    start_time = time.time()

    # Run gemini in non-interactive mode
    cmd = ["gemini", "--prompt", prompt, "--model", model, "--output-format", "text"]
    result = subprocess.run(cmd, capture_output=True, text=True)

    elapsed = time.time() - start_time

    if result.returncode != 0:
        print(f"ERROR: Gemini CLI failed with code {result.returncode}")
        print(f"STDERR: {result.stderr}")
        sys.exit(1)

    print(f"✅ Response received in {elapsed:.2f}s")

    return {
        "model": model,
        "elapsed_seconds": elapsed,
        "response": result.stdout.strip(),
        "raw": result.stdout
    }


def browser_mode(prompt: str, account: str = "sunvision07@gmail.com") -> dict:
    """
    Use Playwright to automate gemini.google.com with Deep Think mode.

    NOTE: This requires:
    1. Playwright installed (pip install playwright)
    2. Browsers installed (playwright install)
    3. User must be logged in to Google account with AI Ultra plan

    Args:
        prompt: The query to send
        account: Google account email (must have AI Ultra for Deep Think)

    Returns:
        dict with 'response' key
    """
    try:
        from playwright.sync_api import sync_playwright
    except ImportError:
        print("ERROR: playwright not installed. Install with:")
        print("  pip install playwright")
        print("  playwright install")
        sys.exit(1)

    print(f"🌐 Launching browser automation (account: {account})...")
    print("⚠️  WARNING: This requires manual login or saved session cookies.")
    print("⚠️  Deep Think is web UI only for now (API in early access).")

    with sync_playwright() as p:
        # Launch browser (use chromium for best compatibility)
        browser = p.chromium.launch(headless=False)  # headless=True after debugging
        context = browser.new_context()
        page = context.new_page()

        # Navigate to Gemini
        print("📍 Navigating to gemini.google.com...")
        page.goto("https://gemini.google.com/")

        # Wait for user to login if needed
        print("⏳ Waiting for Gemini to load (you may need to login manually)...")
        page.wait_for_load_state("networkidle")

        # TODO: Implement Deep Think selection
        # The UI may have changed, need to inspect current structure
        print("❌ BROWSER MODE NOT YET IMPLEMENTED")
        print("   Reason: Deep Think selection UI needs inspection")
        print("   Workaround: Use API mode or CLI mode instead")

        browser.close()

        return {
            "error": "Browser mode not yet implemented. Use --mode api or --mode cli instead."
        }


def save_output(result: dict, output_path: str):
    """Save result to file."""
    output_file = Path(output_path)
    output_file.parent.mkdir(parents=True, exist_ok=True)

    # Save as JSON for structured data
    with open(output_file, "w") as f:
        json.dump(result, f, indent=2)

    # Also save markdown version for readability
    md_file = output_file.with_suffix(".md")
    with open(md_file, "w") as f:
        f.write(f"# Gemini Deep Think Result\n\n")
        f.write(f"**Model**: {result.get('model', 'unknown')}\n")
        f.write(f"**Elapsed**: {result.get('elapsed_seconds', 0):.2f}s\n\n")

        if result.get("thinking"):
            f.write(f"## Thinking Process\n\n{result['thinking']}\n\n")

        f.write(f"## Response\n\n{result.get('response', result.get('raw', ''))}\n")

    print(f"💾 Saved to {output_file} and {md_file}")


def commit_to_memory(result: dict):
    """Commit result to Genesis memory systems."""
    print("🧠 Committing to Genesis memory...")

    # Save to Supermemory
    import subprocess

    content = f"""# Gemini Deep Think Result

Model: {result.get('model', 'unknown')}
Elapsed: {result.get('elapsed_seconds', 0):.2f}s

## Thinking
{result.get('thinking', 'N/A')}

## Response
{result.get('response', result.get('raw', ''))}
"""

    save_script = Path(__file__).parent.parent / "mcp-servers" / "supermemory" / "save.sh"
    if save_script.exists():
        subprocess.run([str(save_script), content], check=True)
        print("✅ Saved to Supermemory (genesis-kinan collection)")
    else:
        print("⚠️  Supermemory save script not found, skipping")


def main():
    parser = argparse.ArgumentParser(
        description="Genesis Deep Think Bridge - Connect Gemini thinking to Genesis",
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog="""
Examples:
  # API mode (recommended)
  python deep_think_bridge.py --mode api --prompt "Design a memory architecture"

  # CLI mode
  python deep_think_bridge.py --mode cli --prompt "Analyze this code"

  # From file with CTM
  python deep_think_bridge.py --mode api --file prompt.txt --ctm

  # Custom model and thinking budget
  python deep_think_bridge.py --mode api --model gemini-2.5-pro --thinking-budget 10000 --prompt "Complex query"
        """
    )

    parser.add_argument("--mode", choices=["api", "cli", "browser"], default="api",
                       help="Execution mode (default: api)")
    parser.add_argument("--prompt", "-p", help="Prompt text")
    parser.add_argument("--file", "-f", help="Read prompt from file")
    parser.add_argument("--output", "-o", help="Save output to file")
    parser.add_argument("--ctm", action="store_true", help="Commit result to memory")
    parser.add_argument("--model", default="gemini-2.5-flash",
                       help="Model ID (default: gemini-2.5-flash)")
    parser.add_argument("--thinking-budget", type=int, default=8192,
                       help="Thinking token budget: up to 24576 (default: 8192)")

    args = parser.parse_args()

    # Get prompt
    if args.file:
        prompt = Path(args.file).read_text().strip()
    elif args.prompt:
        prompt = args.prompt
    else:
        print("ERROR: Must provide --prompt or --file")
        parser.print_help()
        sys.exit(1)

    # Execute based on mode
    if args.mode == "api":
        result = api_mode(prompt, model=args.model, thinking_budget=args.thinking_budget)
    elif args.mode == "cli":
        result = cli_mode(prompt, model=args.model)
    elif args.mode == "browser":
        result = browser_mode(prompt)

    # Print result
    print("\n" + "="*80)
    if result.get("thinking"):
        print("🧠 THINKING PROCESS:")
        print(result["thinking"])
        print("\n" + "-"*80 + "\n")

    print("💬 RESPONSE:")
    print(result.get("response", result.get("raw", "")))
    print("="*80 + "\n")

    # Save if requested
    if args.output:
        save_output(result, args.output)

    # CTM if requested
    if args.ctm:
        commit_to_memory(result)

    return 0


if __name__ == "__main__":
    sys.exit(main())
