# swarm_coordinator.py
import asyncio
import json
import os
import psutil
import time
from datetime import datetime
from pathlib import Path
from typing import List, Dict, Any, Optional, Callable
from dataclasses import dataclass, field, asdict
from enum import Enum
import random
import uuid

class WorkerStatus(Enum):
    PENDING = "pending"
    RUNNING = "running"
    SUCCESS = "success"
    FAILED = "failed"

@dataclass
class WorkerResult:
    """Result from a worker agent."""
    worker_id: str
    worker_type: str
    status: WorkerStatus
    output: Any = None
    error: Optional[str] = None
    duration_ms: float = 0.0
    tokens_used: int = 0

    def to_dict(self) -> Dict:
        d = asdict(self)
        d['status'] = self.status.value
        return d

@dataclass
class SwarmState:
    """Shared state for the entire swarm."""
    swarm_id: str
    task: str
    budget: float
    spent: float = 0.0
    start_time: str = field(default_factory=datetime.now().isoformat)
    status: str = "running" # "running", "completed", "failed"
    results: List[WorkerResult] = field(default_factory=list)
    aggregated_result: Optional[Dict] = None

class SwarmCoordinator:
    """
    Coordinates a dynamic swarm of specialized agents.

    Architecture:
    - Queen Core: Central decision hub
    - Guardian Ring: 6-node defensive validation
    - Processing Ring: 10-node operational tier
    - Worker Swarm: Dynamic execution layer
    """

    def __init__(self, task: str, budget: float, workspace_path: str = "genesis-swarm", config_path: str = "swarm_config.json"):
        self.swarm_id = str(uuid.uuid4())
        self.task = task
        self.budget = budget
        self.workspace = Path(workspace_path)
        self.config_path = Path(config_path)
        self.state = SwarmState(swarm_id=self.swarm_id, task=task, budget=budget)
        self.queen_core = None # Placeholder for central decision hub
        self.guardian_ring = {} # Validation nodes
        self.processing_ring = {} # Operational nodes
        self.worker_swarm = {} # Dynamic workers
        self.agent_config = self._load_agent_config()
        self._setup_infrastructure()

    def _load_agent_config(self) -> Dict:
        """Loads agent configurations from a JSON file."""
        if not self.config_path.exists():
            print(f"Config file not found: {self.config_path}")
            return {}

        try:
            with open(self.config_path, "r") as f:
                return json.load(f)
        except Exception as e:
            print(f"Error loading agent config: {e}")
            return {}

    def _setup_infrastructure(self):
        """Initializes the Queen Core, Guardian Ring, and Processing Ring."""
        # Initialize Queen Core (simplification - can be a specific agent type)
        self.queen_core = QueenCore(coordinator=self)

        # Initialize Guardian Ring (6 nodes)
        for i in range(1, 7):
            agent_id = f"guardian_{i}"
            agent_config = self.agent_config.get(agent_id, {})
            self.guardian_ring[agent_id] = GuardianAgent(agent_id=agent_id, agent_config=agent_config, coordinator=self)

        # Initialize Processing Ring (10 nodes)
        for i in range(1, 11):
            agent_id = f"processor_{i}"
            agent_config = self.agent_config.get(agent_id, {})
            self.processing_ring[agent_id] = ProcessingAgent(agent_id=agent_id, agent_config=agent_config, coordinator=self)

        print("Swarm Infrastructure initialized.")

    async def allocate_task(self, subtask: str, required_capabilities: List[str], budget_allocation: float) -> List[str]:
        """
        Allocates a subtask to suitable agents in the worker swarm.

        Args:
            subtask: The subtask to be executed.
            required_capabilities: List of agent capabilities required for the task.
            budget_allocation: The budget allocated for this subtask.

        Returns:
            A list of worker IDs assigned to the subtask.
        """
        # 1. Identify suitable worker agents based on capabilities.
        eligible_workers = []
        for worker_id, worker in self.worker_swarm.items():
            if all(capability in worker.capabilities for capability in required_capabilities):
                eligible_workers.append(worker_id)

        if not eligible_workers:
            print(f"No suitable workers found for subtask: {subtask}")
            return []

        # 2. Assign the subtask to the chosen workers.  For now, just assign to all eligible workers.
        assigned_workers = eligible_workers
        print(f"Assigned subtask '{subtask}' to workers: {assigned_workers}")

        # 3. TODO: Track budget allocation for this subtask.  Not implemented yet.

        return assigned_workers

    async def execute_subtask(self, subtask: str, assigned_workers: List[str], executor: Callable = None) -> List[WorkerResult]:
        """Executes a subtask using the assigned worker agents."""
        tasks = []
        for worker_id in assigned_workers:
            worker = self.worker_swarm[worker_id]
            tasks.append(worker.execute(subtask, self.state, executor))

        results = await asyncio.gather(*tasks, return_exceptions=True)

        worker_results = []
        for i, result in enumerate(results):
            if isinstance(result, Exception):
                worker_results.append(WorkerResult(
                    worker_id=assigned_workers[i],
                    worker_type="unknown",
                    status=WorkerStatus.FAILED,
                    error=str(result)
                ))
            else:
                worker_results.append(result)

        self.state.results.extend(worker_results)  # Accumulate results
        return worker_results

    async def handle_agent_failure(self, worker_id: str):
        """Handles agent failures by reassigning the task."""
        print(f"Worker {worker_id} failed. Reassigning task...")
        # TODO: Implement task reassignment logic.  For now, just remove the worker.
        if worker_id in self.worker_swarm:
            del self.worker_swarm[worker_id]

    async def aggregate_results(self, results: List[WorkerResult]) -> Dict[str, Any]:
        """Aggregates results from all worker agents."""
        successful = [r for r in results if r.status == WorkerStatus.SUCCESS]
        failed = [r for r in results if r.status == WorkerStatus.FAILED]

        # Calculate metrics
        total_duration = sum(r.duration_ms for r in results)
        success_rate = len(successful) / len(results) if results else 0

        # Synthesize outputs
        # In production: LLM would synthesize these
        synthesized = {
            "worker_outputs": [r.output for r in successful],
            "key_findings": [r.output.get("result", "") for r in successful if r.output],
        }

        aggregated = {
            "swarm_id": self.swarm_id,
            "task": self.task,
            "synthesis": synthesized,
            "metrics": {
                "total_workers": len(results),
                "successful": len(successful),
                "failed": len(failed),
                "success_rate": success_rate,
                "total_duration_ms": total_duration
            },
            "failed_workers": [{"id": r.worker_id, "error": r.error} for r in failed],
            "completed_at": datetime.now().isoformat()
        }

        self.state.aggregated_result = aggregated
        self.state.status = "completed"

        return aggregated

    async def execute_swarm(self, executor: Callable = None):
        """Main execution loop for the swarm."""
        print(f"Starting swarm execution for task: {self.task}")

        # 1. Queen Core analyzes the main task and decomposes it into subtasks.
        subtasks = await self.queen_core.decompose_task(self.task)

        # 2. Iterate through subtasks and allocate them to worker agents.
        all_results = []
        for subtask_data in subtasks:
            subtask = subtask_data["task"]
            required_capabilities = subtask_data["capabilities"]
            budget_allocation = subtask_data["budget"]

            assigned_workers = await self.allocate_task(subtask, required_capabilities, budget_allocation)

            if assigned_workers:
                results = await self.execute_subtask(subtask, assigned_workers, executor)
                all_results.extend(results)

        # 3. Aggregate results from all subtasks.
        aggregated_results = await self.aggregate_results(all_results)

        # 4. Print final results and update swarm state.
        print("Swarm execution completed.")
        print("Aggregated Results:", aggregated_results)
        self.state.status = "completed"

    def add_worker(self, worker: 'WorkerAgent'):
        """Adds a worker to the worker swarm."""
        self.worker_swarm[worker.worker_id] = worker

    def remove_worker(self, worker_id: str):
        """Removes a worker from the worker swarm."""
        if worker_id in self.worker_swarm:
            del self.worker_swarm[worker_id]

    def get_swarm_status(self) -> Dict:
        """Returns the current status of the swarm."""
        return asdict(self.state)

class QueenCore:
    """Central decision hub that decomposes the main task."""

    def __init__(self, coordinator: SwarmCoordinator):
        self.coordinator = coordinator

    async def decompose_task(self, task: str) -> List[Dict[str, Any]]:
        """
        Decomposes the main task into smaller subtasks.

        In a real implementation, this would use an LLM.
        """
        print(f"Queen Core decomposing task: {task}")
        # Dummy implementation: splits the task into 3 subtasks
        subtasks = [
            {
                "task": f"Research the background of {task}",
                "capabilities": ["research", "information_gathering"],
                "budget": self.coordinator.budget / 3
            },
            {
                "task": f"Analyze the key aspects of {task}",
                "capabilities": ["analysis", "extraction"],
                "budget": self.coordinator.budget / 3
            },
            {
                "task": f"Summarize the findings about {task}",
                "capabilities": ["summarization", "synthesis"],
                "budget": self.coordinator.budget / 3
            }
        ]
        return subtasks

class GuardianAgent:
    """Validates outputs and ensures security."""

    def __init__(self, agent_id: str, agent_config: Dict, coordinator: SwarmCoordinator):
        self.agent_id = agent_id
        self.agent_config = agent_config
        self.coordinator = coordinator
        self.capabilities = ["validation", "security"]

    async def validate_output(self, output: Any) -> bool:
        """Validates the output from other agents."""
        # Implement validation logic here (e.g., check for biases, errors, security vulnerabilities)
        print(f"Guardian Agent {self.agent_id} validating output: {output}")
        await asyncio.sleep(0.05)  # Simulate work
        return True

class ProcessingAgent:
    """Performs operational tasks."""

    def __init__(self, agent_id: str, agent_config: Dict, coordinator: SwarmCoordinator):
        self.agent_id = agent_id
        self.agent_config = agent_config
        self.coordinator = coordinator
        self.capabilities = ["processing", "data_manipulation"]

    async def process_data(self, data: Any) -> Any:
        """Processes data according to its configuration."""
        print(f"Processing Agent {self.agent_id} processing data: {data}")
        await asyncio.sleep(0.05)  # Simulate work
        return data

class WorkerAgent:
    """A worker agent that executes a specific role."""

    def __init__(self, worker_id: str, worker_type: str, role: str, capabilities: List[str]):
        self.worker_id = worker_id
        self.worker_type = worker_type
        self.role = role
        self.status = WorkerStatus.PENDING
        self.capabilities = capabilities

    async def execute(self, task: str, shared_state: SwarmState, executor: Callable = None) -> WorkerResult:
        """Executes the worker's assigned task."""
        start_time = datetime.now()
        self.status = WorkerStatus.RUNNING

        try:
            if executor:
                output = await executor(task, self.role, shared_state)
            else:
                # Default: simulate work (replace with actual LLM call)
                output = {
                    "worker_id": self.worker_id,
                    "role": self.role,
                    "task_received": task,
                    "result": f"Completed {self.role} analysis for task: {task}"
                }
                await asyncio.sleep(0.1)  # Simulate work

            self.status = WorkerStatus.SUCCESS
            duration = (datetime.now() - start_time).total_seconds() * 1000

            return WorkerResult(
                worker_id=self.worker_id,
                worker_type=self.worker_type,
                status=WorkerStatus.SUCCESS,
                output=output,
                duration_ms=duration
            )

        except Exception as e:
            self.status = WorkerStatus.FAILED
            duration = (datetime.now() - start_time).total_seconds() * 1000

            return WorkerResult(
                worker_id=self.worker_id,
                worker_type=self.worker_type,
                status=WorkerStatus.FAILED,
                error=str(e),
                duration_ms=duration
            )

# Example usage:
async def main():
    # Create a dummy swarm_config.json file for testing
    config_data = {
        "guardian_1": {"role": "Security Validator"},
        "guardian_2": {"role": "Bias Detector"},
        "guardian_3": {"role": "Error Checker"},
        "guardian_4": {"role": "Security Validator"},
        "guardian_5": {"role": "Bias Detector"},
        "guardian_6": {"role": "Error Checker"},
        "processor_1": {"role": "Data Aggregator"},
        "processor_2": {"role": "Data Transformer"},
        "processor_3": {"role": "Data Aggregator"},
        "processor_4": {"role": "Data Transformer"},
        "processor_5": {"role": "Data Aggregator"},
        "processor_6": {"role": "Data Transformer"},
        "processor_7": {"role": "Data Aggregator"},
        "processor_8": {"role": "Data Transformer"},
        "processor_9": {"role": "Data Aggregator"},
        "processor_10": {"role": "Data Transformer"},
    }
    with open("swarm_config.json", "w") as f:
        json.dump(config_data, f)

    coordinator = SwarmCoordinator(task="Research and summarize the benefits of renewable energy", budget=1000.0)

    # Add some worker agents to the swarm
    coordinator.add_worker(WorkerAgent(worker_id="researcher_1", worker_type="researcher", role="Information Gathering", capabilities=["research", "information_gathering"]))
    coordinator.add_worker(WorkerAgent(worker_id="analyzer_1", worker_type="analyzer", role="Data Analysis", capabilities=["analysis", "extraction"]))
    coordinator.add_worker(WorkerAgent(worker_id="summarizer_1", worker_type="summarizer", role="Content Synthesis", capabilities=["summarization", "synthesis"]))

    await coordinator.execute_swarm()

    # Clean up dummy config file
    os.remove("swarm_config.json")

if __name__ == "__main__":
    asyncio.run(main())