"""
PROMETHEUS — Capabilities & New Tech General
=============================================
Fires evaluation agents to assess and integrate new capabilities, MCPs, and models.

Usage:
    from core.generals.prometheus import spawn_prometheus
    results = spawn_prometheus("Evaluate Gemini 2.5 Flash vs MiniMax for coding tasks")
"""

import json
import uuid
from datetime import datetime, timezone
from pathlib import Path

REPO_ROOT = Path("/mnt/e/genesis-system")
SWARM_PROGRESS_DIR = REPO_ROOT / "data" / "swarm_progress"
HIVE_PROGRESS_DIR = REPO_ROOT / "hive" / "progress"

PROMETHEUS_EVAL_PROMPT = """You are PROMETHEUS, the Capabilities & New Tech evaluator for Genesis.

Capability to evaluate: {capability}
Evaluation dimension {agent_index} of {total_agents}: {dimension}

Instructions:
1. Research this capability/model/tool thoroughly
2. Test it against Genesis use cases (ReceptionistAI, voice widget, GHL integration)
3. Benchmark against current Genesis stack
4. Identify integration path (native? custom code? MCP?)
5. Write capability card to: {output_file}

Capability Card format:
## Capability: {capability}
### Dimension: {dimension}
### Summary
### Performance vs Current Stack
### Integration Complexity (1-5 scale)
### Cost Impact ($USD/month estimate)
### Recommendation: ADOPT / TRIAL / REJECT
### Integration Path
### Next Steps
"""

PROMETHEUS_MCP_BUILD_PROMPT = """You are PROMETHEUS, building a new MCP server for Genesis.

MCP to build: {capability}
Component {agent_index} of {total_agents}: {component}

Instructions:
1. Read existing MCP servers in /mnt/e/genesis-system/mcp-servers/ for patterns
2. Build this MCP component following the standard structure
3. Write to: {output_file}
4. Include: manifest.json, main handler, tool definitions, README

MCP Structure:
- index.js (or main.py) — tool handlers
- manifest.json — tool definitions
- README.md — usage instructions
"""

CAPABILITY_BENCHMARK_PROMPT = """You are a PROMETHEUS benchmark runner.

Model under test: {capability}
Benchmark suite {agent_index} of {total_agents}: {dimension}

Run these Genesis-specific benchmarks:
1. Code generation: Write a FastAPI endpoint with Pydantic models
2. Reasoning: Solve a multi-step orchestration problem
3. Context handling: Summarize a 10K token session transcript
4. Cost efficiency: Tokens used vs output quality
5. Speed: Time-to-first-token

Write results to: {output_file}
"""

# Evaluation dimensions by capability type
EVAL_DIMENSIONS = {
    "model": [
        "Code generation quality vs MiniMax and Claude Haiku",
        "Reasoning depth vs Gemini 2.5 Flash",
        "Context window utilization (1M+ tokens)",
        "Cost per task vs current Genesis stack",
        "Speed and latency benchmarks",
    ],
    "mcp": [
        "Tool capability coverage",
        "Integration complexity with Genesis",
        "Security and sandboxing",
        "Performance and rate limits",
        "Documentation quality",
    ],
    "capability": [
        "Use case fit for Genesis missions",
        "Integration path and complexity",
        "Cost impact analysis",
        "Risk assessment",
        "Implementation roadmap",
    ],
}


def spawn_prometheus(capability_to_evaluate: str, agent_count: int = 3) -> dict:
    """
    Spawn evaluation agents for a new capability, model, or MCP.

    Parameters
    ----------
    capability_to_evaluate : str
        What to evaluate (model name, MCP name, capability description)
    agent_count : int
        Number of parallel evaluation agents (default 3)

    Returns
    -------
    dict with swarm metadata
    """
    SWARM_PROGRESS_DIR.mkdir(parents=True, exist_ok=True)
    HIVE_PROGRESS_DIR.mkdir(parents=True, exist_ok=True)

    swarm_id = f"prometheus_{uuid.uuid4().hex[:8]}"
    output_dir = HIVE_PROGRESS_DIR / swarm_id
    output_dir.mkdir(parents=True, exist_ok=True)

    cap_lower = capability_to_evaluate.lower()
    is_mcp = "mcp" in cap_lower or "server" in cap_lower or "build" in cap_lower
    is_model = any(kw in cap_lower for kw in ["model", "gemini", "claude", "minimax", "deepseek", "kimi"])
    task_type = "mcp" if is_mcp else ("model" if is_model else "capability")
    model = "anthropic/claude-sonnet-4-5" if is_mcp else "anthropic/claude-haiku-4-5"

    dimensions = EVAL_DIMENSIONS.get(task_type, EVAL_DIMENSIONS["capability"])[:agent_count]
    prompt_template = PROMETHEUS_MCP_BUILD_PROMPT if is_mcp else PROMETHEUS_EVAL_PROMPT

    swarm_state = {
        "swarm_id": swarm_id,
        "general": "PROMETHEUS",
        "mission": f"Evaluate: {capability_to_evaluate[:80]}",
        "model": model,
        "task_type": task_type,
        "agent_count": len(dimensions),
        "stories_completed": 0,
        "stories_completed_this_hour": 0,
        "status": "running",
        "started_at": datetime.now(timezone.utc).isoformat(),
        "output_dir": str(output_dir),
    }

    state_file = SWARM_PROGRESS_DIR / f"{swarm_id}.json"

    for i, dimension in enumerate(dimensions):
        output_file = output_dir / f"eval_{i+1:02d}_output.md"
        prompt = prompt_template.format(
            capability=capability_to_evaluate,
            agent_index=i + 1,
            total_agents=len(dimensions),
            dimension=dimension,
            component=dimension,
            output_file=str(output_file),
        )
        (output_dir / f"eval_{i+1:02d}_prompt.md").write_text(prompt)

        # NOTE: Framework only — no live API calls.
        # Live: subprocess.Popen(["claude", "-p", prompt, "--output", str(output_file)])
        print(f"[PROMETHEUS] Agent {i+1}/{len(dimensions)} queued: {dimension[:60]}")

    _save_state(state_file, swarm_state)
    print(f"[PROMETHEUS] Swarm {swarm_id} — {len(dimensions)} evaluators for: {capability_to_evaluate[:60]}")
    return swarm_state


def spawn(capability_to_evaluate: str) -> dict:
    """Alias for spawn_prometheus — used by genesis_hive.py router."""
    return spawn_prometheus(capability_to_evaluate)


def _save_state(path: Path, state: dict) -> None:
    path.write_text(json.dumps(state, indent=2, default=str))
