"""
PM-007: Fresh Session Spawner (Gemini)
Spawn fresh Gemini API sessions for Genesis TRUE Method.

Acceptance Criteria:
- [x] GIVEN task WHEN spawn_gemini_session() THEN new client
- [x] AND learnings passed as discrete prompt (not history)
- [x] AND session destroyed after completion
- [x] AND returns success/failure + new learnings

Dependencies: PM-005, PM-006
"""

import os
import json
import logging
import time
from datetime import datetime
from typing import Optional, Dict, Any, Tuple
from dataclasses import dataclass, field

try:
    import google.generativeai as genai
except ImportError:
    genai = None

from core.model_tier_loader import load_model_tiers, TierConfig
from core.learning_accumulator import get_learning_accumulator, FailureLesson

logger = logging.getLogger(__name__)


@dataclass
class SessionResult:
    """Result from a Gemini session execution."""
    success: bool
    output: Optional[str] = None
    error_type: Optional[str] = None
    error_message: Optional[str] = None

    # Metrics
    input_tokens: int = 0
    output_tokens: int = 0
    duration_ms: int = 0
    cost: float = 0.0

    # Learning
    new_lesson: Optional[FailureLesson] = None

    # Metadata
    model: str = ""
    attempt: int = 0
    tier: int = 1
    timestamp: str = field(default_factory=lambda: datetime.utcnow().isoformat())

    def to_dict(self) -> Dict[str, Any]:
        result = {
            "success": self.success,
            "output": self.output,
            "error_type": self.error_type,
            "error_message": self.error_message,
            "input_tokens": self.input_tokens,
            "output_tokens": self.output_tokens,
            "duration_ms": self.duration_ms,
            "cost": self.cost,
            "model": self.model,
            "attempt": self.attempt,
            "tier": self.tier,
            "timestamp": self.timestamp
        }
        if self.new_lesson:
            result["new_lesson"] = self.new_lesson.to_dict()
        return result


class GeminiSessionSpawner:
    """
    Spawn fresh Gemini API sessions.

    Key Principles:
    - Each session is COMPLETELY FRESH (no history reuse)
    - Learnings passed as DISCRETE PROMPT CONTEXT
    - Session destroyed after completion
    - Returns structured results with new learnings
    """

    def __init__(self,
                 api_key: Optional[str] = None,
                 default_model: str = "gemini-2.5-flash"):
        """
        Initialize GeminiSessionSpawner.

        Args:
            api_key: Gemini API key. Defaults to env GEMINI_API_KEY.
            default_model: Default model to use.
        """
        self.api_key = api_key or os.getenv("GEMINI_API_KEY")
        self.default_model = default_model
        self.tier_config = load_model_tiers()
        self.learning_accumulator = get_learning_accumulator()

        # Configure Gemini if available
        if genai and self.api_key:
            genai.configure(api_key=self.api_key)
            logger.info("Gemini API configured")
        elif not genai:
            logger.warning("google-generativeai package not installed")
        else:
            logger.warning("GEMINI_API_KEY not set")

    def _build_prompt(self,
                     task_description: str,
                     task_id: str,
                     include_learnings: bool = True,
                     additional_context: Optional[str] = None) -> str:
        """
        Build prompt with discrete learnings (not history).

        Args:
            task_description: The task to complete
            task_id: Task identifier for learning lookup
            include_learnings: Whether to include previous learnings
            additional_context: Optional additional context

        Returns:
            Complete prompt string
        """
        prompt_parts = []

        # Add learnings from previous attempts (DISCRETE, not history)
        if include_learnings:
            learnings = self.learning_accumulator.format_lessons_for_prompt(task_id)
            if learnings:
                prompt_parts.append(learnings)
                prompt_parts.append("---\n")

        # Add additional context if provided
        if additional_context:
            prompt_parts.append("## CONTEXT")
            prompt_parts.append(additional_context)
            prompt_parts.append("\n---\n")

        # Add the main task
        prompt_parts.append("## TASK")
        prompt_parts.append(task_description)
        prompt_parts.append("\n")
        prompt_parts.append("Complete this task. Provide a clear, working solution.")

        return "\n".join(prompt_parts)

    def spawn_session(self,
                     task_id: str,
                     task_description: str,
                     attempt: int,
                     tier: int = 1,
                     model: Optional[str] = None,
                     timeout_seconds: Optional[int] = None,
                     additional_context: Optional[str] = None,
                     temperature: float = 0.7,
                     max_output_tokens: int = 8192) -> SessionResult:
        """
        Spawn a fresh Gemini session.

        Args:
            task_id: Task identifier
            task_description: Description of task to complete
            attempt: Attempt number (for tracking)
            tier: Execution tier
            model: Model to use (defaults to tier 1 model)
            timeout_seconds: Request timeout
            additional_context: Additional context to include
            temperature: Generation temperature
            max_output_tokens: Maximum output tokens

        Returns:
            SessionResult with outcome and new learnings
        """
        start_time = time.time()

        # Get tier config
        tier_config = self.tier_config.get_tier(tier)
        if tier_config and tier_config.provider == "gemini":
            model = model or tier_config.model
            timeout_seconds = timeout_seconds or tier_config.timeout_seconds
        else:
            model = model or self.default_model
            timeout_seconds = timeout_seconds or 120

        logger.info(f"Spawning Gemini session: task={task_id}, attempt={attempt}, model={model}")

        # Check if Gemini is available
        if not genai or not self.api_key:
            return SessionResult(
                success=False,
                error_type="ConfigurationError",
                error_message="Gemini API not configured",
                model=model,
                attempt=attempt,
                tier=tier,
                duration_ms=int((time.time() - start_time) * 1000)
            )

        try:
            # Build prompt with discrete learnings
            prompt = self._build_prompt(
                task_description=task_description,
                task_id=task_id,
                include_learnings=True,
                additional_context=additional_context
            )

            # Create FRESH model instance (no history)
            generation_config = genai.GenerationConfig(
                temperature=temperature,
                max_output_tokens=max_output_tokens
            )

            model_instance = genai.GenerativeModel(
                model_name=model,
                generation_config=generation_config
            )

            # Make the API call
            response = model_instance.generate_content(
                prompt,
                request_options={"timeout": timeout_seconds}
            )

            duration_ms = int((time.time() - start_time) * 1000)

            # Extract token counts
            input_tokens = 0
            output_tokens = 0
            if hasattr(response, 'usage_metadata'):
                input_tokens = getattr(response.usage_metadata, 'prompt_token_count', 0)
                output_tokens = getattr(response.usage_metadata, 'candidates_token_count', 0)

            # Calculate cost
            cost = self._calculate_cost(model, input_tokens, output_tokens)

            # Get response text
            output_text = response.text if hasattr(response, 'text') else str(response)

            logger.info(f"Gemini session completed: task={task_id}, duration={duration_ms}ms, "
                       f"tokens={input_tokens}+{output_tokens}")

            return SessionResult(
                success=True,
                output=output_text,
                input_tokens=input_tokens,
                output_tokens=output_tokens,
                duration_ms=duration_ms,
                cost=cost,
                model=model,
                attempt=attempt,
                tier=tier
            )

        except Exception as e:
            duration_ms = int((time.time() - start_time) * 1000)
            error_type = type(e).__name__
            error_message = str(e)[:500]

            logger.warning(f"Gemini session failed: {error_type}: {error_message}")

            # Capture failure as learning
            new_lesson = self.learning_accumulator.capture_failure(
                task_id=task_id,
                attempt_number=attempt,
                tier=tier,
                error_type=error_type,
                error_message=error_message,
                model=model,
                duration_ms=duration_ms
            )

            return SessionResult(
                success=False,
                error_type=error_type,
                error_message=error_message,
                duration_ms=duration_ms,
                model=model,
                attempt=attempt,
                tier=tier,
                new_lesson=new_lesson
            )

    def _calculate_cost(self, model: str, input_tokens: int, output_tokens: int) -> float:
        """Calculate cost based on model and token counts."""
        # Gemini Flash pricing (approximate)
        if "flash" in model.lower():
            input_cost = (input_tokens / 1000) * 0.00001
            output_cost = (output_tokens / 1000) * 0.00004
        # Gemini Pro pricing
        else:
            input_cost = (input_tokens / 1000) * 0.00125
            output_cost = (output_tokens / 1000) * 0.00375

        return input_cost + output_cost

    def spawn_with_retries(self,
                          task_id: str,
                          task_description: str,
                          max_attempts: int = 20,
                          tier: int = 1,
                          **kwargs) -> Tuple[SessionResult, int]:
        """
        Spawn sessions with retries until success or max attempts.

        Args:
            task_id: Task identifier
            task_description: Task description
            max_attempts: Maximum attempts (default 20 for Tier 1)
            tier: Execution tier
            **kwargs: Additional arguments for spawn_session

        Returns:
            Tuple of (final SessionResult, attempts used)
        """
        for attempt in range(1, max_attempts + 1):
            result = self.spawn_session(
                task_id=task_id,
                task_description=task_description,
                attempt=attempt,
                tier=tier,
                **kwargs
            )

            if result.success:
                return result, attempt

            logger.info(f"Attempt {attempt}/{max_attempts} failed for task {task_id}")

        # Return final failed result
        return result, max_attempts

    def check_availability(self) -> Dict[str, Any]:
        """Check if Gemini API is available and configured."""
        status = {
            "configured": bool(genai and self.api_key),
            "api_key_set": bool(self.api_key),
            "package_installed": bool(genai),
            "default_model": self.default_model
        }

        # Try a minimal API call if configured
        if status["configured"]:
            try:
                model = genai.GenerativeModel(self.default_model)
                # Just verify we can create a model instance
                status["connection_test"] = "passed"
            except Exception as e:
                status["connection_test"] = f"failed: {str(e)}"

        return status


# Factory function
def spawn_gemini_session(task_id: str,
                        task_description: str,
                        attempt: int = 1,
                        **kwargs) -> SessionResult:
    """
    Convenience function to spawn a single Gemini session.

    Args:
        task_id: Task identifier
        task_description: Task description
        attempt: Attempt number
        **kwargs: Additional arguments

    Returns:
        SessionResult
    """
    spawner = GeminiSessionSpawner()
    return spawner.spawn_session(
        task_id=task_id,
        task_description=task_description,
        attempt=attempt,
        **kwargs
    )


if __name__ == "__main__":
    # Test the GeminiSessionSpawner
    logging.basicConfig(level=logging.INFO)

    spawner = GeminiSessionSpawner()

    print("Gemini Availability:")
    print(json.dumps(spawner.check_availability(), indent=2))

    # Test with a simple task (only if API is configured)
    if spawner.api_key and genai:
        result = spawner.spawn_session(
            task_id="test-001",
            task_description="What is 2 + 2? Reply with just the number.",
            attempt=1
        )
        print("\nSession Result:")
        print(json.dumps(result.to_dict(), indent=2))
