"""
Execution Bridge MCP Tools
============================
Live execution bridge: Claude decomposes tasks, dispatches to Gemini swarm,
gets results back. Bridges MCP protocol to GenesisExecutionLayer.

Tools:
- exec_decompose: Break task into RWL stories
- exec_story: Execute single story via Gemini
- exec_swarm: Parallel execution of multiple stories
- exec_status: Rate maximizer utilization + execution stats
"""

import asyncio
import json
import sys
import logging
from dataclasses import asdict
from datetime import datetime
from pathlib import Path

logger = logging.getLogger(__name__)

# Paths
SYNC_DIR = Path("/mnt/e/genesis-system/data/antigravity-sync")
EVENTS_FILE = SYNC_DIR / "events.jsonl"
EXECUTIONS_DIR = SYNC_DIR / "executions"

# Lazy loaded execution layer
_execution_layer = None


def _get_execution_layer():
    """Lazy load GenesisExecutionLayer."""
    global _execution_layer
    if _execution_layer is None:
        try:
            sys.path.insert(0, '/mnt/e/genesis-system')
            from core.genesis_execution_layer import get_execution_layer
            _execution_layer = get_execution_layer()
        except Exception as e:
            logger.error(f"GenesisExecutionLayer unavailable: {e}")
    return _execution_layer


def _log_event(event_type: str, data: dict):
    """Append event to events.jsonl."""
    try:
        event = {
            "timestamp": datetime.utcnow().isoformat(),
            "source": "execution_bridge",
            "type": event_type,
            "data": data
        }
        EVENTS_FILE.parent.mkdir(parents=True, exist_ok=True)
        with open(EVENTS_FILE, "a") as f:
            f.write(json.dumps(event) + "\n")
    except Exception as e:
        logger.error(f"Failed to log event: {e}")


def _save_execution(execution_id: str, data: dict):
    """Save execution result to file."""
    try:
        EXECUTIONS_DIR.mkdir(parents=True, exist_ok=True)
        filepath = EXECUTIONS_DIR / f"{execution_id}.json"
        with open(filepath, "w") as f:
            json.dump(data, f, indent=2, default=str)
    except Exception as e:
        logger.error(f"Failed to save execution: {e}")


def exec_decompose(task: str, context: str = "") -> str:
    """
    Break a task into RWL stories using Gemini.

    Args:
        task: Task description to decompose
        context: Optional context for the decomposition

    Returns:
        JSON array of stories with id, title, description, acceptance_criteria, priority
    """
    layer = _get_execution_layer()
    if not layer:
        return json.dumps({
            "error": "GenesisExecutionLayer not available",
            "fallback": [{
                "id": "STORY-001",
                "title": task[:100],
                "description": task,
                "acceptance_criteria": [{"description": "Task completed", "verification": "manual"}],
                "priority": 1
            }]
        })

    try:
        stories = layer.decompose_to_stories(task, context)

        stories_data = []
        for s in stories:
            stories_data.append({
                "id": s.id,
                "title": s.title,
                "description": s.description,
                "acceptance_criteria": s.acceptance_criteria,
                "priority": s.priority
            })

        _log_event("decomposition", {
            "task": task[:200],
            "stories_count": len(stories_data)
        })

        return json.dumps({
            "stories": stories_data,
            "count": len(stories_data),
            "task": task[:200]
        })

    except Exception as e:
        logger.error(f"Decomposition failed: {e}")
        return json.dumps({"error": str(e)})


def exec_story(story_json: str) -> str:
    """
    Execute a single RWL story via Gemini.

    Args:
        story_json: JSON with story fields: id, title, description,
                    acceptance_criteria, priority

    Returns:
        JSON with execution result including passes, result, iterations
    """
    try:
        story_data = json.loads(story_json)
    except json.JSONDecodeError as e:
        return json.dumps({"error": f"Invalid JSON: {e}"})

    layer = _get_execution_layer()
    if not layer:
        return json.dumps({"error": "GenesisExecutionLayer not available"})

    try:
        from core.genesis_execution_layer import Story

        story = Story(
            id=story_data.get("id", "STORY-001"),
            title=story_data.get("title", ""),
            description=story_data.get("description", ""),
            acceptance_criteria=story_data.get("acceptance_criteria", []),
            priority=story_data.get("priority", 5)
        )

        executed = layer.execute_story(story)

        result = {
            "id": executed.id,
            "title": executed.title,
            "passes": executed.passes,
            "iterations": executed.iterations,
            "result": executed.result,
            "error": executed.error,
            "completed_at": executed.completed_at
        }

        execution_id = f"story_{executed.id}_{datetime.utcnow().strftime('%Y%m%d_%H%M%S')}"
        _save_execution(execution_id, result)
        _log_event("story_executed", {
            "story_id": executed.id,
            "passes": executed.passes,
            "iterations": executed.iterations
        })

        return json.dumps(result)

    except Exception as e:
        logger.error(f"Story execution failed: {e}")
        return json.dumps({"error": str(e)})


def exec_swarm(stories_json: str, max_parallel: int = 3) -> str:
    """
    Execute multiple stories in parallel via Gemini swarm.

    Args:
        stories_json: JSON array of story objects
        max_parallel: Max concurrent Gemini agents (default 3)

    Returns:
        JSON with aggregate results: stories_completed, stories_failed,
        total_tokens, total_cost, elapsed_seconds
    """
    try:
        stories_data = json.loads(stories_json)
        if not isinstance(stories_data, list):
            return json.dumps({"error": "stories_json must be a JSON array"})
    except json.JSONDecodeError as e:
        return json.dumps({"error": f"Invalid JSON: {e}"})

    layer = _get_execution_layer()
    if not layer:
        return json.dumps({"error": "GenesisExecutionLayer not available"})

    try:
        from core.genesis_execution_layer import Story

        stories = [
            Story(
                id=s.get("id", f"STORY-{i+1:03d}"),
                title=s.get("title", ""),
                description=s.get("description", ""),
                acceptance_criteria=s.get("acceptance_criteria", []),
                priority=s.get("priority", 5)
            )
            for i, s in enumerate(stories_data)
        ]

        # Run the async swarm in a new event loop
        loop = asyncio.new_event_loop()
        try:
            result = loop.run_until_complete(
                layer.execute_rwl_swarm(stories, max_parallel=max_parallel)
            )
        finally:
            loop.close()

        result_data = {
            "success": result.success,
            "mode": result.mode.value,
            "stories_completed": result.stories_completed,
            "stories_failed": result.stories_failed,
            "total_tokens": result.total_tokens,
            "total_cost": result.total_cost,
            "elapsed_seconds": result.elapsed_seconds,
            "errors": result.errors
        }

        execution_id = f"swarm_{datetime.utcnow().strftime('%Y%m%d_%H%M%S')}"
        _save_execution(execution_id, result_data)
        _log_event("swarm_executed", {
            "stories_count": len(stories),
            "completed": result.stories_completed,
            "failed": result.stories_failed,
            "elapsed": result.elapsed_seconds
        })

        return json.dumps(result_data)

    except Exception as e:
        logger.error(f"Swarm execution failed: {e}")
        return json.dumps({"error": str(e)})


def exec_status() -> str:
    """
    Get execution layer status including rate maximizer utilization.

    Returns:
        JSON with execution stats, rate limit utilization, best model,
        and recommendations
    """
    layer = _get_execution_layer()
    if not layer:
        return json.dumps({
            "error": "GenesisExecutionLayer not available",
            "status": "offline"
        })

    try:
        status = layer.get_status()

        # Convert non-serializable fields
        result = {
            "initialized": status.get("initialized", False),
            "total_executions": status.get("total_executions", 0),
            "total_tokens": status.get("total_tokens", 0),
            "total_cost": status.get("total_cost", 0.0),
            "best_model": str(status.get("best_model", "unknown")),
            "recommendations": status.get("recommendations", [])
        }

        # Try to get rate limit utilization
        try:
            utilization = status.get("rate_limit_utilization")
            if utilization is not None:
                result["rate_limit_utilization"] = float(utilization)
        except (TypeError, ValueError):
            result["rate_limit_utilization"] = None

        return json.dumps(result, default=str)

    except Exception as e:
        logger.error(f"Status check failed: {e}")
        return json.dumps({"error": str(e), "status": "error"})
