#!/usr/bin/env python3
"""
OpenRouter Swarm Orchestrator - Mission-Driven Edition
Executes 102 atomic stories from SWARM_MISSIONS.md across MiniMax M2.5 and Kimi K2.5 models.

USAGE:
    python openrouter_swarm.py                    # Run all stories (auto-routed)
    python openrouter_swarm.py --team minimax     # Run only MiniMax stories
    python openrouter_swarm.py --team kimi        # Run only Kimi stories
    python openrouter_swarm.py --limit 10         # Run first 10 stories only
    python openrouter_swarm.py --skip-completed   # Skip already-completed stories (reads existing JSONL files)

    IMPORTANT: Run with PYTHONUNBUFFERED=1 to see real-time output:
        PYTHONUNBUFFERED=1 python openrouter_swarm.py --skip-completed

OUTPUTS:
    /mnt/e/genesis-system/hive/swarm_results/minimax_results.jsonl
    /mnt/e/genesis-system/hive/swarm_results/kimi_results.jsonl
    /mnt/e/genesis-system/hive/SWARM_METRICS_REPORT.md

Author: Genesis System
Date: 2026-02-15
"""

import os
import sys
import json
import asyncio
import argparse
import re
from datetime import datetime
from typing import List, Dict, Any, Optional
from dataclasses import dataclass, asdict
from pathlib import Path
import time

# Add Genesis to path
sys.path.append('/mnt/e/genesis-system')

try:
    import aiohttp
except ImportError:
    print("ERROR: aiohttp not installed. Install with: pip install aiohttp")
    sys.exit(1)


# Configuration
OPENROUTER_BASE_URL = "https://openrouter.ai/api/v1/chat/completions"
MISSIONS_FILE = "/mnt/e/genesis-system/hive/SWARM_MISSIONS.md"

# Model IDs
MINIMAX_MODEL_ID = "minimax/minimax-m2.5"
KIMI_MODEL_ID = "moonshotai/kimi-k2.5"

# Batch size for parallel execution (reduced for Kimi to avoid rate limits)
BATCH_SIZE_MINIMAX = 20
BATCH_SIZE_KIMI = 5  # Smaller batches for Kimi to prevent rate limiting

# Request timeout settings (prevent indefinite hangs)
REQUEST_TIMEOUT_SECONDS = 300  # Per-request timeout (5 minutes)
BATCH_TIMEOUT_SECONDS = 600    # Per-batch timeout (10 minutes)

# Output paths
RESULTS_DIR = "/mnt/e/genesis-system/hive/swarm_results"
MINIMAX_RESULTS_FILE = f"{RESULTS_DIR}/minimax_results.jsonl"
KIMI_RESULTS_FILE = f"{RESULTS_DIR}/kimi_results.jsonl"
REPORT_FILE = "/mnt/e/genesis-system/hive/SWARM_METRICS_REPORT.md"


@dataclass
class Story:
    """Represents an atomic story from SWARM_MISSIONS.md."""
    story_id: str
    title: str
    role: str
    need: str
    benefit: str
    acceptance_criteria: List[str]
    black_box_tests: str
    white_box_tests: str
    estimated_tokens: int
    model: str
    prd_name: str

    def to_task_description(self) -> str:
        """Convert story to task description for agent."""
        return f"""# Story: {self.story_id} - {self.title}

**User Story:**
As a {self.role}, I need {self.need}, so that {self.benefit}.

**Acceptance Criteria:**
{chr(10).join(f"- {criterion}" for criterion in self.acceptance_criteria)}

**Black Box Tests:**
{self.black_box_tests}

**White Box Tests:**
{self.white_box_tests}

**Instructions:**
Implement this story following the acceptance criteria. Provide production-ready code/content with inline comments, error handling, and best practices. Include example usage or test scenarios where applicable.
"""

    def to_system_prompt(self) -> str:
        """Generate appropriate system prompt based on story type."""
        if self.model.lower() == "kimi":
            return "You are an expert software engineer building production-grade code. Write clean, well-documented, type-hinted Python code with comprehensive error handling. Follow PEP 8 and best practices."
        else:
            return "You are an expert content strategist and technical writer. Create clear, compelling, accurate content that drives user engagement and conversion. Be precise, actionable, and authentic."


@dataclass
class AgentTask:
    """Represents a single agent task."""
    agent_id: int
    team: str
    story: Story

    @property
    def task_description(self) -> str:
        return self.story.to_task_description()

    @property
    def system_prompt(self) -> str:
        return self.story.to_system_prompt()


@dataclass
class AgentResult:
    """Represents the result of an agent execution."""
    agent_id: int
    team: str
    model: str
    story_id: str
    story_title: str
    prd_name: str
    status: str  # success/fail
    prompt_tokens: int
    completion_tokens: int
    total_tokens: int
    cost_usd: float
    response_time_ms: int
    response: Optional[str] = None
    error: Optional[str] = None
    timestamp: str = None

    def __post_init__(self):
        if self.timestamp is None:
            self.timestamp = datetime.now().isoformat()


def parse_missions_file(file_path: str) -> List[Story]:
    """Parse SWARM_MISSIONS.md and extract all atomic stories."""
    stories = []

    if not os.path.exists(file_path):
        print(f"ERROR: Missions file not found: {file_path}")
        sys.exit(1)

    with open(file_path, 'r', encoding='utf-8') as f:
        content = f.read()

    # Pattern to match story sections
    # Story format: #### STORY_ID: Title
    story_pattern = re.compile(
        r'####\s+([\w-]+):\s+(.+?)\n'  # Story ID and title
        r'\*\*As a\*\*\s+(.+?),\s+\*\*I need\*\*\s+(.+?),\s+\*\*so that\*\*\s+(.+?)\.\n'  # User story
        r'\*\*Acceptance Criteria\*\*:\n(.*?)'  # Acceptance criteria
        r'\*\*Black Box Tests\*\*:\s+(.+?)\n'  # Black box tests
        r'\*\*White Box Tests\*\*:\s+(.+?)\n'  # White box tests
        r'\*\*Estimated Tokens\*\*:\s+([\d,]+)\n'  # Estimated tokens
        r'\*\*Model\*\*:\s+(\w+)',  # Model
        re.DOTALL
    )

    # Find current PRD context
    # Format: # PRD 1: SUNAIVA MEMORY VAULT (Ship in 6hrs)
    prd_pattern = re.compile(r'^# PRD \d+:\s+([^\(]+)', re.MULTILINE)

    # Split content by PRDs to track context
    current_prd = "Unknown PRD"
    lines = content.split('\n')
    current_story_text = []
    in_story = False

    for i, line in enumerate(lines):
        # Track PRD context
        prd_match = prd_pattern.match(line)
        if prd_match:
            current_prd = prd_match.group(1)

        # Check if we're starting a story
        if line.startswith('####'):
            if in_story and current_story_text:
                # Process previous story
                story_text = '\n'.join(current_story_text)
                parsed_story = parse_single_story(story_text, current_prd)
                if parsed_story:
                    stories.append(parsed_story)

            # Start new story
            current_story_text = [line]
            in_story = True
        elif in_story:
            current_story_text.append(line)

            # Check if we've reached the end of the story (next ### or end of section)
            if line.strip().startswith('---') and i > 0 and lines[i-1].strip().startswith('**Model**:'):
                story_text = '\n'.join(current_story_text)
                parsed_story = parse_single_story(story_text, current_prd)
                if parsed_story:
                    stories.append(parsed_story)
                current_story_text = []
                in_story = False

    # Process last story if any
    if in_story and current_story_text:
        story_text = '\n'.join(current_story_text)
        parsed_story = parse_single_story(story_text, current_prd)
        if parsed_story:
            stories.append(parsed_story)

    print(f"✅ Parsed {len(stories)} stories from {file_path}")
    return stories


def parse_single_story(story_text: str, prd_name: str) -> Optional[Story]:
    """Parse a single story from text."""
    try:
        # Extract story ID and title
        title_match = re.search(r'####\s+([\w-]+):\s+(.+)', story_text)
        if not title_match:
            return None

        story_id = title_match.group(1)
        title = title_match.group(2).strip()

        # Extract user story components
        role_match = re.search(r'\*\*As a\*\*\s+(.+?),', story_text)
        need_match = re.search(r'\*\*I need\*\*\s+(.+?),\s+\*\*so that\*\*', story_text)
        benefit_match = re.search(r'\*\*so that\*\*\s+(.+?)\.', story_text)

        if not (role_match and need_match and benefit_match):
            return None

        role = role_match.group(1).strip()
        need = need_match.group(1).strip()
        benefit = benefit_match.group(1).strip()

        # Extract acceptance criteria
        criteria_match = re.search(r'\*\*Acceptance Criteria\*\*:\n(.*?)\*\*Black Box Tests\*\*:', story_text, re.DOTALL)
        acceptance_criteria = []
        if criteria_match:
            criteria_text = criteria_match.group(1)
            # Extract bullet points
            for line in criteria_text.split('\n'):
                line = line.strip()
                if line.startswith('-'):
                    acceptance_criteria.append(line[1:].strip())

        # Extract tests
        black_box_match = re.search(r'\*\*Black Box Tests\*\*:\s+(.+?)\n\*\*White Box Tests\*\*:', story_text, re.DOTALL)
        white_box_match = re.search(r'\*\*White Box Tests\*\*:\s+(.+?)\n\*\*Estimated Tokens\*\*:', story_text, re.DOTALL)

        black_box_tests = black_box_match.group(1).strip() if black_box_match else ""
        white_box_tests = white_box_match.group(1).strip() if white_box_match else ""

        # Extract estimated tokens
        tokens_match = re.search(r'\*\*Estimated Tokens\*\*:\s+([\d,]+)', story_text)
        estimated_tokens = int(tokens_match.group(1).replace(',', '')) if tokens_match else 5000

        # Extract model
        model_match = re.search(r'\*\*Model\*\*:\s+(\w+)', story_text)
        model = model_match.group(1) if model_match else "Kimi"

        return Story(
            story_id=story_id,
            title=title,
            role=role,
            need=need,
            benefit=benefit,
            acceptance_criteria=acceptance_criteria,
            black_box_tests=black_box_tests,
            white_box_tests=white_box_tests,
            estimated_tokens=estimated_tokens,
            model=model,
            prd_name=prd_name
        )

    except Exception as e:
        print(f"WARNING: Failed to parse story: {e}")
        return None


def load_api_key() -> str:
    """Load OpenRouter API key from env or credentials file."""
    # Try environment variable first
    api_key = os.getenv('OPENROUTER_API_KEY')
    if api_key:
        return api_key

    print("ERROR: OPENROUTER_API_KEY not found.")
    print("Set it with: export OPENROUTER_API_KEY='your_key'")
    sys.exit(1)


def create_tasks_from_stories(stories: List[Story], team_filter: Optional[str] = None) -> List[AgentTask]:
    """Convert stories to agent tasks, optionally filtering by team."""
    tasks = []
    agent_id = 1

    for story in stories:
        # Determine team based on model
        team = "MINIMAX" if story.model.lower() == "minimax" else "KIMI"

        # Apply team filter if specified
        if team_filter:
            if team_filter.upper() != team:
                continue

        tasks.append(AgentTask(
            agent_id=agent_id,
            team=team,
            story=story
        ))
        agent_id += 1

    return tasks


async def execute_agent(
    session: aiohttp.ClientSession,
    task: AgentTask,
    model_id: str,
    api_key: str
) -> AgentResult:
    """Execute a single agent task via OpenRouter API with timeout protection."""
    start_time = time.time()

    headers = {
        "Authorization": f"Bearer {api_key}",
        "Content-Type": "application/json",
        "HTTP-Referer": "https://github.com/genesis-system",
        "X-Title": "Genesis Swarm Orchestrator"
    }

    payload = {
        "model": model_id,
        "messages": [
            {"role": "system", "content": task.system_prompt},
            {"role": "user", "content": task.task_description}
        ],
        "temperature": 0.7,
        "max_tokens": min(task.story.estimated_tokens + 1000, 8192)  # Cap at 8K
    }

    try:
        # Add timeout to prevent indefinite hangs
        timeout = aiohttp.ClientTimeout(total=REQUEST_TIMEOUT_SECONDS)
        async with session.post(OPENROUTER_BASE_URL, json=payload, headers=headers, timeout=timeout) as response:
            response_time_ms = int((time.time() - start_time) * 1000)

            if response.status != 200:
                error_text = await response.text()
                return AgentResult(
                    agent_id=task.agent_id,
                    team=task.team,
                    model=model_id,
                    story_id=task.story.story_id,
                    story_title=task.story.title,
                    prd_name=task.story.prd_name,
                    status="fail",
                    prompt_tokens=0,
                    completion_tokens=0,
                    total_tokens=0,
                    cost_usd=0.0,
                    response_time_ms=response_time_ms,
                    error=f"HTTP {response.status}: {error_text[:200]}"
                )

            data = await response.json()

            # Extract usage stats
            usage = data.get('usage', {})
            prompt_tokens = usage.get('prompt_tokens', 0)
            completion_tokens = usage.get('completion_tokens', 0)
            total_tokens = usage.get('total_tokens', 0)

            # Calculate cost (OpenRouter uses per-million tokens)
            # MiniMax M2.5: ~$1.00/MTok, Kimi K2.5: ~$1.07/MTok
            cost_per_mtok = 1.07 if 'kimi' in model_id.lower() else 1.00
            cost_usd = (total_tokens / 1_000_000) * cost_per_mtok

            # Extract response
            response_text = data['choices'][0]['message']['content']

            return AgentResult(
                agent_id=task.agent_id,
                team=task.team,
                model=model_id,
                story_id=task.story.story_id,
                story_title=task.story.title,
                prd_name=task.story.prd_name,
                status="success",
                prompt_tokens=prompt_tokens,
                completion_tokens=completion_tokens,
                total_tokens=total_tokens,
                cost_usd=cost_usd,
                response_time_ms=response_time_ms,
                response=response_text
            )

    except asyncio.TimeoutError:
        response_time_ms = int((time.time() - start_time) * 1000)
        return AgentResult(
            agent_id=task.agent_id,
            team=task.team,
            model=model_id,
            story_id=task.story.story_id,
            story_title=task.story.title,
            prd_name=task.story.prd_name,
            status="fail",
            prompt_tokens=0,
            completion_tokens=0,
            total_tokens=0,
            cost_usd=0.0,
            response_time_ms=response_time_ms,
            error=f"Request timeout after {REQUEST_TIMEOUT_SECONDS}s"
        )
    except Exception as e:
        response_time_ms = int((time.time() - start_time) * 1000)
        return AgentResult(
            agent_id=task.agent_id,
            team=task.team,
            model=model_id,
            story_id=task.story.story_id,
            story_title=task.story.title,
            prd_name=task.story.prd_name,
            status="fail",
            prompt_tokens=0,
            completion_tokens=0,
            total_tokens=0,
            cost_usd=0.0,
            response_time_ms=response_time_ms,
            error=str(e)[:200]
        )


async def execute_swarm(
    tasks: List[AgentTask],
    model_id: str,
    api_key: str,
    batch_size: int
) -> List[AgentResult]:
    """Execute all tasks in batches with timeout protection."""
    results = []
    total_tasks = len(tasks)

    async with aiohttp.ClientSession() as session:
        for i in range(0, total_tasks, batch_size):
            batch = tasks[i:i + batch_size]
            batch_num = (i // batch_size) + 1
            total_batches = (total_tasks + batch_size - 1) // batch_size

            print(f"  Batch {batch_num}/{total_batches}: Processing {len(batch)} stories...")

            try:
                # Execute batch in parallel with timeout
                batch_results = await asyncio.wait_for(
                    asyncio.gather(
                        *[execute_agent(session, task, model_id, api_key) for task in batch]
                    ),
                    timeout=BATCH_TIMEOUT_SECONDS
                )

                results.extend(batch_results)

                # Print progress
                successes = len([r for r in batch_results if r.status == "success"])
                failures = len([r for r in batch_results if r.status == "fail"])
                print(f"    ✅ {successes} success, ❌ {failures} failed")

                # Add 10 second delay between batches for Kimi to avoid rate limits
                if 'kimi' in model_id.lower() and i + batch_size < total_tasks:
                    print(f"    ⏸️  Waiting 10 seconds before next batch...")
                    await asyncio.sleep(10)

            except asyncio.TimeoutError:
                print(f"    ❌ Batch {batch_num} timed out after {BATCH_TIMEOUT_SECONDS}s - marking all as failed")
                # Mark all batch tasks as failed due to timeout
                for task in batch:
                    results.append(AgentResult(
                        agent_id=task.agent_id,
                        team=task.team,
                        model=model_id,
                        story_id=task.story.story_id,
                        story_title=task.story.title,
                        prd_name=task.story.prd_name,
                        status="fail",
                        prompt_tokens=0,
                        completion_tokens=0,
                        total_tokens=0,
                        cost_usd=0.0,
                        response_time_ms=BATCH_TIMEOUT_SECONDS * 1000,
                        error=f"Batch timeout after {BATCH_TIMEOUT_SECONDS}s"
                    ))

    return results


def load_completed_story_ids(output_file: str) -> set:
    """Load completed story IDs from existing JSONL file."""
    completed = set()

    if not os.path.exists(output_file):
        return completed

    try:
        with open(output_file, 'r') as f:
            for line in f:
                data = json.loads(line)
                if data.get('status') == 'success':
                    completed.add(data.get('story_id'))
        print(f"  Found {len(completed)} completed stories in {output_file}")
    except Exception as e:
        print(f"  Warning: Could not read existing results: {e}")

    return completed


def save_results(results: List[AgentResult], output_file: str, append: bool = False):
    """Save results to JSONL file (append mode for --skip-completed)."""
    os.makedirs(os.path.dirname(output_file), exist_ok=True)

    mode = 'a' if append else 'w'
    with open(output_file, mode) as f:
        for result in results:
            f.write(json.dumps(asdict(result)) + '\n')

    print(f"  Results saved to: {output_file} ({'appended' if append else 'overwritten'})")


def generate_report(minimax_results: List[AgentResult], kimi_results: List[AgentResult]):
    """Generate comprehensive metrics report."""
    all_results = minimax_results + kimi_results

    # Calculate metrics
    total_agents = len(all_results)
    total_successes = len([r for r in all_results if r.status == "success"])
    total_failures = len([r for r in all_results if r.status == "fail"])
    success_rate = (total_successes / total_agents * 100) if total_agents > 0 else 0

    total_tokens = sum(r.total_tokens for r in all_results)
    total_cost = sum(r.cost_usd for r in all_results)
    avg_response_time = sum(r.response_time_ms for r in all_results) / total_agents if total_agents > 0 else 0

    # PRD breakdown
    prd_breakdown = {}
    for r in all_results:
        if r.prd_name not in prd_breakdown:
            prd_breakdown[r.prd_name] = {"total": 0, "success": 0, "fail": 0, "cost": 0.0}
        prd_breakdown[r.prd_name]["total"] += 1
        if r.status == "success":
            prd_breakdown[r.prd_name]["success"] += 1
        else:
            prd_breakdown[r.prd_name]["fail"] += 1
        prd_breakdown[r.prd_name]["cost"] += r.cost_usd

    # Team-specific metrics
    def team_metrics(team_results):
        if not team_results:
            return {}
        return {
            "total": len(team_results),
            "successes": len([r for r in team_results if r.status == "success"]),
            "failures": len([r for r in team_results if r.status == "fail"]),
            "success_rate": len([r for r in team_results if r.status == "success"]) / len(team_results) * 100,
            "total_tokens": sum(r.total_tokens for r in team_results),
            "total_cost": sum(r.cost_usd for r in team_results),
            "avg_response_time": sum(r.response_time_ms for r in team_results) / len(team_results)
        }

    minimax_metrics = team_metrics(minimax_results)
    kimi_metrics = team_metrics(kimi_results)

    # Generate markdown report
    report = f"""# OpenRouter Swarm Execution Report
**Mission-Driven Edition** - Executing SWARM_MISSIONS.md

**Generated:** {datetime.now().isoformat()}

## Overview

| Metric | Value |
|--------|-------|
| **Total Stories Executed** | {total_agents} |
| **Successful Executions** | {total_successes} ({success_rate:.1f}%) |
| **Failed Executions** | {total_failures} |
| **Total Tokens Used** | {total_tokens:,} |
| **Total Cost** | ${total_cost:.4f} |
| **Average Response Time** | {avg_response_time:.0f}ms |

---

## Team MiniMax (Content & Marketing)

**Model:** `{MINIMAX_MODEL_ID}`

| Metric | Value |
|--------|-------|
| **Total Stories** | {minimax_metrics.get('total', 0)} |
| **Successes** | {minimax_metrics.get('successes', 0)} ({minimax_metrics.get('success_rate', 0):.1f}%) |
| **Failures** | {minimax_metrics.get('failures', 0)} |
| **Total Tokens** | {minimax_metrics.get('total_tokens', 0):,} |
| **Total Cost** | ${minimax_metrics.get('total_cost', 0):.4f} |
| **Avg Response Time** | {minimax_metrics.get('avg_response_time', 0):.0f}ms |

---

## Team Kimi (Code & Architecture)

**Model:** `{KIMI_MODEL_ID}`

| Metric | Value |
|--------|-------|
| **Total Stories** | {kimi_metrics.get('total', 0)} |
| **Successes** | {kimi_metrics.get('successes', 0)} ({kimi_metrics.get('success_rate', 0):.1f}%) |
| **Failures** | {kimi_metrics.get('failures', 0)} |
| **Total Tokens** | {kimi_metrics.get('total_tokens', 0):,} |
| **Total Cost** | ${kimi_metrics.get('total_cost', 0):.4f} |
| **Avg Response Time** | {kimi_metrics.get('avg_response_time', 0):.0f}ms |

---

## PRD Breakdown

| PRD | Total Stories | Successes | Failures | Cost |
|-----|---------------|-----------|----------|------|
"""

    for prd_name, metrics in sorted(prd_breakdown.items()):
        report += f"| {prd_name} | {metrics['total']} | {metrics['success']} | {metrics['fail']} | ${metrics['cost']:.4f} |\n"

    report += f"""
---

## Cost Analysis

| Component | Cost |
|-----------|------|
| **MiniMax Team** | ${minimax_metrics.get('total_cost', 0):.4f} |
| **Kimi Team** | ${kimi_metrics.get('total_cost', 0):.4f} |
| **Total** | ${total_cost:.4f} |

**Cost Per Story:** ${total_cost / total_agents:.4f} (average)

---

## Performance Metrics

### Response Time Distribution

| Team | Avg Response Time | Min | Max |
|------|-------------------|-----|-----|
| **MiniMax** | {minimax_metrics.get('avg_response_time', 0):.0f}ms | {min([r.response_time_ms for r in minimax_results], default=0)}ms | {max([r.response_time_ms for r in minimax_results], default=0)}ms |
| **Kimi** | {kimi_metrics.get('avg_response_time', 0):.0f}ms | {min([r.response_time_ms for r in kimi_results], default=0)}ms | {max([r.response_time_ms for r in kimi_results], default=0)}ms |

---

## Completed Stories

### MiniMax Stories ({len(minimax_results)})
"""

    for r in minimax_results:
        status_icon = "✅" if r.status == "success" else "❌"
        report += f"{status_icon} **{r.story_id}**: {r.story_title}\n"

    report += f"""
### Kimi Stories ({len(kimi_results)})
"""

    for r in kimi_results:
        status_icon = "✅" if r.status == "success" else "❌"
        report += f"{status_icon} **{r.story_id}**: {r.story_title}\n"

    report += """
---

## Failed Stories

"""

    # Add failed story details
    failed = [r for r in all_results if r.status == "fail"]
    if failed:
        report += f"**Total Failures:** {len(failed)}\n\n"
        for r in failed:
            report += f"- **{r.story_id}** ({r.team}): {r.error}\n"
    else:
        report += "**No failures!** All stories executed successfully.\n"

    report += f"""
---

## Output Files

- **MiniMax Results:** `{MINIMAX_RESULTS_FILE}`
- **Kimi Results:** `{KIMI_RESULTS_FILE}`
- **This Report:** `{REPORT_FILE}`

---

## Next Steps

1. **Review Results:** Read JSONL files for story outputs
2. **Extract Artifacts:** Code, content, configs from successful stories
3. **Integrate Outputs:** Merge generated work into codebase
4. **Fix Failures:** Manually complete failed stories or re-run
5. **Deploy:** Ship Sunaiva Memory Vault (6hr sprint complete!)

---

**Generated by:** Genesis OpenRouter Swarm Orchestrator (Mission-Driven)
**Timestamp:** {datetime.now().isoformat()}
**Source:** `{MISSIONS_FILE}`
"""

    # Save report
    with open(REPORT_FILE, 'w') as f:
        f.write(report)

    print(f"\n{'='*80}")
    print("SWARM EXECUTION COMPLETE")
    print(f"{'='*80}")
    print(f"Total Stories: {total_agents}")
    print(f"Successes: {total_successes} ({success_rate:.1f}%)")
    print(f"Failures: {total_failures}")
    print(f"Total Cost: ${total_cost:.4f}")
    print(f"Average Response Time: {avg_response_time:.0f}ms")
    print(f"\nReport saved to: {REPORT_FILE}")
    print(f"{'='*80}\n")


async def main():
    """Main orchestrator."""
    parser = argparse.ArgumentParser(description="OpenRouter Swarm Orchestrator - Mission-Driven")
    parser.add_argument(
        '--team',
        choices=['minimax', 'kimi', 'both'],
        default='both',
        help='Which team to run (default: both)'
    )
    parser.add_argument(
        '--limit',
        type=int,
        default=None,
        help='Limit number of stories to execute (default: all)'
    )
    parser.add_argument(
        '--skip-completed',
        action='store_true',
        help='Skip stories already completed in JSONL files (appends new results)'
    )
    args = parser.parse_args()

    print(f"\n{'='*80}")
    print("GENESIS OPENROUTER SWARM ORCHESTRATOR - MISSION-DRIVEN")
    print(f"{'='*80}")
    print(f"Timestamp: {datetime.now().isoformat()}")
    print(f"Missions File: {MISSIONS_FILE}")
    print(f"Team: {args.team.upper()}")
    print(f"MiniMax Batch Size: {BATCH_SIZE_MINIMAX}")
    print(f"Kimi Batch Size: {BATCH_SIZE_KIMI}")
    print(f"Request Timeout: {REQUEST_TIMEOUT_SECONDS}s")
    print(f"Batch Timeout: {BATCH_TIMEOUT_SECONDS}s")
    print(f"Skip Completed: {args.skip_completed}")
    print(f"{'='*80}\n")

    # Load API key
    api_key = load_api_key()
    print(f"✅ OpenRouter API key loaded\n")

    # Load completed story IDs if --skip-completed
    completed_story_ids = set()
    if args.skip_completed:
        print("📋 Loading completed stories...")
        minimax_completed = load_completed_story_ids(MINIMAX_RESULTS_FILE)
        kimi_completed = load_completed_story_ids(KIMI_RESULTS_FILE)
        completed_story_ids = minimax_completed | kimi_completed
        print(f"✅ Found {len(completed_story_ids)} total completed stories\n")

    # Parse missions file
    print("📖 Parsing SWARM_MISSIONS.md...")
    all_stories = parse_missions_file(MISSIONS_FILE)

    if not all_stories:
        print("ERROR: No stories found in missions file")
        sys.exit(1)

    print(f"✅ Found {len(all_stories)} total stories\n")

    # Filter out completed stories
    if args.skip_completed:
        original_count = len(all_stories)
        all_stories = [s for s in all_stories if s.story_id not in completed_story_ids]
        skipped_count = original_count - len(all_stories)
        print(f"⏭️  Skipped {skipped_count} already-completed stories")
        print(f"📝 Remaining: {len(all_stories)} stories to execute\n")

        if len(all_stories) == 0:
            print("✅ All stories already completed! Nothing to do.")
            sys.exit(0)

    # Create tasks based on team filter
    team_filter = None if args.team == 'both' else args.team
    all_tasks = create_tasks_from_stories(all_stories, team_filter)

    # Apply limit if specified
    if args.limit:
        all_tasks = all_tasks[:args.limit]
        print(f"⚠️  Limited to first {args.limit} stories\n")

    minimax_results = []
    kimi_results = []

    # Separate tasks by team
    minimax_tasks = [t for t in all_tasks if t.team == "MINIMAX"]
    kimi_tasks = [t for t in all_tasks if t.team == "KIMI"]

    # Execute MiniMax team
    if minimax_tasks:
        print(f"{'='*80}")
        print(f"TEAM MINIMAX: {len(minimax_tasks)} stories (Content & Marketing)")
        print(f"{'='*80}")
        minimax_results = await execute_swarm(minimax_tasks, MINIMAX_MODEL_ID, api_key, BATCH_SIZE_MINIMAX)
        save_results(minimax_results, MINIMAX_RESULTS_FILE, append=args.skip_completed)
        print()

    # Execute Kimi team
    if kimi_tasks:
        print(f"{'='*80}")
        print(f"TEAM KIMI: {len(kimi_tasks)} stories (Code & Architecture)")
        print(f"{'='*80}")
        kimi_results = await execute_swarm(kimi_tasks, KIMI_MODEL_ID, api_key, BATCH_SIZE_KIMI)
        save_results(kimi_results, KIMI_RESULTS_FILE, append=args.skip_completed)
        print()

    # Load ALL results (old + new) for report generation
    if args.skip_completed:
        print("📊 Loading complete result set for report generation...")
        all_minimax = []
        all_kimi = []

        # Read MiniMax results
        if os.path.exists(MINIMAX_RESULTS_FILE):
            with open(MINIMAX_RESULTS_FILE, 'r') as f:
                for line in f:
                    data = json.loads(line)
                    all_minimax.append(AgentResult(**data))

        # Read Kimi results
        if os.path.exists(KIMI_RESULTS_FILE):
            with open(KIMI_RESULTS_FILE, 'r') as f:
                for line in f:
                    data = json.loads(line)
                    all_kimi.append(AgentResult(**data))

        generate_report(all_minimax, all_kimi)
    else:
        # Generate report from this run only
        generate_report(minimax_results, kimi_results)


if __name__ == "__main__":
    asyncio.run(main())
