#!/usr/bin/env python3
"""
CLOUDFLARE DURABLE OBJECTS RESEARCH SWARM
==========================================
Fires parallel Gemini agents to research and develop SubAIVA capabilities
on Cloudflare Workers + Durable Objects.

Each agent gets a specific research mission. Results are written to
Research reports/ and KNOWLEDGE_GRAPH/entities/.

Usage:
    python3 scripts/cloudflare_research_swarm.py
    python3 scripts/cloudflare_research_swarm.py --missions all
    python3 scripts/cloudflare_research_swarm.py --missions 1,3,5
"""

import asyncio
import json
import sys
import os
import time
from pathlib import Path
from datetime import datetime

# Add project root to path
sys.path.insert(0, str(Path(__file__).parent.parent))

from core.gemini_executor import GeminiExecutor

# Research missions for the swarm
MISSIONS = [
    {
        "id": 1,
        "name": "DO_SQLite_Patterns",
        "model": "flash",
        "prompt": """You are a Cloudflare Durable Objects expert. Research and produce a comprehensive guide on SQLite storage patterns for Durable Objects.

Cover:
1. Schema design patterns for per-user AI agent state
2. How to store conversation history efficiently in DO SQLite
3. Full-text search capabilities in DO SQLite (FTS5 support?)
4. Transaction patterns and concurrency within a single DO
5. Data migration patterns when schema evolves
6. Storage limits and performance characteristics
7. Backup and restore patterns using storage.sql
8. Best practices for indexing within DO SQLite

Output as a structured markdown document with code examples in TypeScript.
Title: "Durable Objects SQLite Patterns for AI Agent State Management"
""",
        "output_file": "Research reports/CF_DO_SQLITE_PATTERNS.md",
    },
    {
        "id": 2,
        "name": "Vectorize_Memory_Architecture",
        "model": "flash",
        "prompt": """You are a vector database architect specializing in Cloudflare Vectorize. Research and produce a comprehensive guide on building a memory system for AI agents using Cloudflare Vectorize.

Cover:
1. Index design: per-customer namespaces vs shared index with metadata filtering
2. Embedding model selection for Workers AI (bge-base-en-v1.5, bge-large-en-v1.5, etc.)
3. Hybrid retrieval: combining Vectorize semantic search with DO SQLite keyword search
4. Memory consolidation patterns: deduplication, importance scoring, decay
5. Batch vs real-time embedding generation
6. Vectorize V2 features and limitations (max dimensions, max vectors, metadata filtering)
7. Cost optimization: when to embed, when to skip
8. Multi-tenant memory isolation patterns

Include TypeScript code examples for each pattern.
Title: "Vectorize Memory Architecture for Multi-Tenant AI Agents"
""",
        "output_file": "Research reports/CF_VECTORIZE_MEMORY_ARCHITECTURE.md",
    },
    {
        "id": 3,
        "name": "Workers_AI_Models",
        "model": "flash",
        "prompt": """You are a Cloudflare Workers AI specialist. Research the current state of Workers AI models available for inference at the edge.

Cover:
1. Full list of text generation models available (Llama 3.x, Qwen, Mistral, etc.)
2. Full list of embedding models available
3. Model performance benchmarks (latency, throughput, quality)
4. Streaming support for chat completions
5. Function calling / tool use support in Workers AI
6. Context window limits per model
7. Rate limits and pricing for Workers AI
8. How to use AI Gateway with Workers AI for caching and analytics
9. Fine-tune deployment support
10. Comparison: Workers AI vs OpenRouter vs direct API for our SubAIVA use case

Output as a structured markdown document.
Title: "Workers AI Model Catalog and SubAIVA Integration Guide"
""",
        "output_file": "Research reports/CF_WORKERS_AI_MODEL_CATALOG.md",
    },
    {
        "id": 4,
        "name": "WebSocket_Hibernation_Patterns",
        "model": "flash",
        "prompt": """You are a real-time systems architect specializing in Cloudflare Durable Objects WebSocket Hibernation API.

Research and produce a comprehensive guide on building real-time AI chat interfaces using DO WebSocket Hibernation.

Cover:
1. WebSocket Hibernation API complete reference (acceptWebSocket, getWebSockets, serializeAttachment, etc.)
2. Connection lifecycle management (connect, message, close, error)
3. Cost model: hibernated vs active DO billing
4. Broadcasting patterns for multi-device sync (same user, multiple tabs)
5. Heartbeat and keepalive strategies
6. Authentication on WebSocket upgrade (token validation)
7. Streaming AI responses over WebSocket (chunked delivery)
8. PartyServer vs raw DO WebSocket comparison for our use case
9. Reconnection patterns (client-side with partysocket)
10. Maximum concurrent connections per DO and scaling strategies

Include TypeScript code examples.
Title: "WebSocket Hibernation Patterns for Real-Time AI Chat"
""",
        "output_file": "Research reports/CF_WEBSOCKET_HIBERNATION_PATTERNS.md",
    },
    {
        "id": 5,
        "name": "Multi_Tenant_Tier_Gating",
        "model": "flash",
        "prompt": """You are a SaaS architecture expert specializing in multi-tenant systems on Cloudflare.

Research and produce a comprehensive guide on implementing tier-based feature gating for a multi-tenant AI agent platform on Cloudflare Workers + Durable Objects.

Our system: SubAIVA — each customer gets their own Durable Object instance. Tiers unlock capabilities:
- Tier 1 ($497/mo): Basic memory, 8B model
- Tier 2 ($997/mo): Advanced memory, 70B model, longer history
- Tier 3 ($1,497/mo): Voice enabled, infinite retention, 70B model
- Queen ($20K+/mo): Full capabilities

Cover:
1. Entitlement storage: KV vs D1 vs DO-internal for tier checks
2. Runtime capability gating patterns in TypeScript
3. Rate limiting per tier (requests/min, tokens/day)
4. Usage metering and billing integration (Stripe webhooks)
5. Upgrade/downgrade patterns (preserving data on downgrade)
6. Feature flag hot-swapping without redeployment
7. Audit logging per customer action
8. Admin dashboard API design
9. Stripe webhook handling in Workers

Include TypeScript code examples.
Title: "Multi-Tenant Tier Gating Architecture for SubAIVA"
""",
        "output_file": "Research reports/CF_MULTI_TENANT_TIER_GATING.md",
    },
    {
        "id": 6,
        "name": "Alarm_Scheduling_Patterns",
        "model": "flash",
        "prompt": """You are a Cloudflare Durable Objects expert specializing in the Alarm API and scheduled task patterns.

Research and produce a comprehensive guide on using DO Alarms for background processing in AI agent systems.

Cover:
1. Alarm API complete reference (setAlarm, getAlarm, deleteAlarm, alarm handler)
2. Memory consolidation scheduler: periodic dedup, importance re-scoring, decay
3. Scheduled report generation per customer
4. Health check and self-healing patterns
5. Retry patterns for failed background tasks
6. Chained alarm patterns (alarm triggers next alarm)
7. Rate limit recovery (schedule retry after rate limit window)
8. Data sync patterns: DO → external database (PostgreSQL) via alarm
9. Maximum alarm frequency and billing implications
10. Cold start behavior when alarm fires on hibernated DO

Include TypeScript code examples.
Title: "Durable Object Alarm Patterns for AI Agent Background Processing"
""",
        "output_file": "Research reports/CF_DO_ALARM_PATTERNS.md",
    },
    {
        "id": 7,
        "name": "Production_Deployment_Security",
        "model": "flash",
        "prompt": """You are a Cloudflare Workers security and deployment expert.

Research and produce a comprehensive guide on production-hardening a Cloudflare Workers + Durable Objects application.

Cover:
1. API authentication patterns (Bearer tokens, JWT, API keys)
2. Rate limiting at the Worker level (not just tier-level)
3. CORS configuration for web clients
4. Custom domain setup with SSL
5. Environment management (staging, production)
6. Secrets management (wrangler secret)
7. Monitoring and alerting (Workers Analytics, Logpush)
8. Error handling and graceful degradation
9. DDoS protection (Cloudflare's built-in + Worker-level)
10. Data encryption at rest in DO SQLite
11. GDPR/AU Privacy Act compliance patterns (data locality, right to erasure)
12. Zero-downtime deployment patterns
13. Rollback strategies

Include TypeScript code examples and wrangler.jsonc configuration.
Title: "Production Security and Deployment Guide for SubAIVA"
""",
        "output_file": "Research reports/CF_PRODUCTION_DEPLOYMENT_SECURITY.md",
    },
]


async def run_mission(executor: GeminiExecutor, mission: dict) -> dict:
    """Execute a single research mission."""
    start = time.time()
    mission_id = mission["id"]
    name = mission["name"]
    output_file = Path("/mnt/e/genesis-system") / mission["output_file"]

    print(f"[Mission {mission_id}] Starting: {name}")

    try:
        result = executor.execute(
            mission["prompt"],
            model=executor.MODELS.get(mission["model"], mission["model"]),
        )

        if result.success and result.response:
            # Write research report
            output_file.parent.mkdir(parents=True, exist_ok=True)
            header = f"# {name}\n\n"
            header += f"**Generated**: {datetime.now().isoformat()}\n"
            header += f"**Model**: {result.model}\n"
            header += f"**Tokens**: {result.tokens_used}\n"
            header += f"**Cost**: ${result.cost_estimate:.4f}\n\n---\n\n"

            output_file.write_text(header + result.response, encoding="utf-8")

            elapsed = time.time() - start
            print(f"[Mission {mission_id}] COMPLETE: {name} ({elapsed:.1f}s, {result.tokens_used} tokens)")

            return {
                "id": mission_id,
                "name": name,
                "success": True,
                "tokens": result.tokens_used,
                "cost": result.cost_estimate,
                "elapsed": elapsed,
                "output_file": str(output_file),
            }
        else:
            print(f"[Mission {mission_id}] FAILED: {name} - {result.error}")
            return {
                "id": mission_id,
                "name": name,
                "success": False,
                "error": result.error,
            }

    except Exception as e:
        elapsed = time.time() - start
        print(f"[Mission {mission_id}] ERROR: {name} - {e}")
        return {
            "id": mission_id,
            "name": name,
            "success": False,
            "error": str(e),
            "elapsed": elapsed,
        }


async def main():
    """Run the Cloudflare research swarm."""
    import argparse

    parser = argparse.ArgumentParser(description="Cloudflare DO Research Swarm")
    parser.add_argument("--missions", default="all", help="Mission IDs to run (e.g., '1,3,5' or 'all')")
    parser.add_argument("--parallel", type=int, default=4, help="Max parallel missions")
    args = parser.parse_args()

    # Select missions
    if args.missions == "all":
        selected = MISSIONS
    else:
        ids = [int(x.strip()) for x in args.missions.split(",")]
        selected = [m for m in MISSIONS if m["id"] in ids]

    print(f"=== CLOUDFLARE RESEARCH SWARM ===")
    print(f"Missions: {len(selected)}")
    print(f"Parallel: {args.parallel}")
    print(f"Models: Gemini 3 Flash Preview (primary)")
    print(f"Started: {datetime.now().isoformat()}")
    print(f"================================\n")

    executor = GeminiExecutor(default_model="flash", use_rate_maximizer=True)

    # Run missions with concurrency limit
    semaphore = asyncio.Semaphore(args.parallel)

    async def bounded_mission(mission):
        async with semaphore:
            return await run_mission(executor, mission)

    results = await asyncio.gather(*[bounded_mission(m) for m in selected])

    # Summary
    print(f"\n=== SWARM RESULTS ===")
    total_tokens = 0
    total_cost = 0.0
    successes = 0
    for r in results:
        status = "OK" if r.get("success") else "FAIL"
        tokens = r.get("tokens", 0)
        cost = r.get("cost", 0)
        total_tokens += tokens
        total_cost += cost
        if r.get("success"):
            successes += 1
        print(f"  [{status}] Mission {r['id']}: {r['name']} ({tokens} tokens, ${cost:.4f})")

    print(f"\nTotal: {successes}/{len(results)} succeeded")
    print(f"Total tokens: {total_tokens:,}")
    print(f"Total cost: ${total_cost:.4f}")

    # Write summary to KG
    kg_path = Path("/mnt/e/genesis-system/KNOWLEDGE_GRAPH/entities/cloudflare_research_swarm_2026_02_26.jsonl")
    with open(kg_path, "a") as f:
        f.write(json.dumps({
            "id": f"cf_swarm_{int(time.time())}",
            "type": "swarm_execution",
            "name": "Cloudflare DO Research Swarm",
            "description": f"{successes}/{len(results)} missions completed. {total_tokens:,} tokens. ${total_cost:.4f} cost.",
            "created": datetime.now().isoformat(),
            "session": 104,
            "tags": ["cloudflare", "durable-objects", "research", "swarm"],
        }) + "\n")

    return results


if __name__ == "__main__":
    asyncio.run(main())
