#!/usr/bin/env python3
"""
Genesis Meta-Agent
===================
An agent that spawns domain expert agents.

"Meta-agentics help you build elements of the system that builds the system."
- IndyDevDan

Usage:
    python meta_agent.py spawn "code_review"
    python meta_agent.py list-domains
    python meta_agent.py execute "code_review" "Review the blackboard.py file"
"""

import sys
import json
import asyncio
from pathlib import Path
from datetime import datetime
from typing import Dict, List, Any, Optional
from dataclasses import dataclass


@dataclass
class DomainExpert:
    """A domain expert agent."""
    domain: str
    expertise_path: Path
    self_improve_path: Path

    def load_expertise(self) -> Dict:
        """Load expertise file."""
        if self.expertise_path.exists():
            with open(self.expertise_path) as f:
                return json.load(f) if self.expertise_path.suffix == '.json' else {"content": f.read()}
        return {}

    def get_mental_model(self) -> str:
        """Get the mental model for this domain."""
        expertise = self.load_expertise()
        return f"""
Domain: {self.domain}
Expertise File: {self.expertise_path}

Mental Model:
{json.dumps(expertise, indent=2) if isinstance(expertise, dict) else expertise.get('content', 'No expertise loaded')}

Remember: The CODE is always the source of truth. This expertise file is a mental model, not documentation.
"""


class MetaAgent:
    """
    Meta-agent that creates and manages domain experts.

    Implements the meta-agentics pattern:
    - Meta prompts: prompts that write prompts
    - Meta agents: agents that build agents
    - Meta skills: skills that build skills
    """

    DOMAINS = {
        "genesis": "Genesis system self-knowledge and coordination",
        "code_review": "Code review and quality analysis",
        "testing": "Test creation and validation",
        "documentation": "Documentation and comments",
        "coordination": "Multi-agent coordination patterns",
        "learning": "Self-improvement and learning patterns",
        "memory": "Memory systems and persistence",
        "architecture": "System architecture and design"
    }

    def __init__(self, base_path: str = "/mnt/e/genesis-system/expertise"):
        self.base_path = Path(base_path)
        self.experts: Dict[str, DomainExpert] = {}

    def list_domains(self) -> List[str]:
        """List available domains."""
        return list(self.DOMAINS.keys())

    def spawn_expert(self, domain: str) -> DomainExpert:
        """
        Spawn a domain expert agent.

        Args:
            domain: The domain for the expert

        Returns:
            DomainExpert instance
        """
        if domain not in self.DOMAINS:
            # Create new domain
            self.DOMAINS[domain] = f"Expert for {domain}"

        domain_path = self.base_path / domain
        domain_path.mkdir(parents=True, exist_ok=True)

        expert = DomainExpert(
            domain=domain,
            expertise_path=domain_path / "expertise.yaml",
            self_improve_path=domain_path / "self_improve.md"
        )

        self.experts[domain] = expert
        return expert

    async def execute_with_expert(
        self,
        domain: str,
        task: str,
        executor_fn=None
    ) -> Dict[str, Any]:
        """
        Execute a task using a domain expert.

        Workflow:
        1. Spawn expert if not exists
        2. Load expertise (mental model)
        3. Execute task with expertise context
        4. Run self-improve to update expertise
        """
        # Spawn expert
        expert = self.spawn_expert(domain)

        # Load mental model
        mental_model = expert.get_mental_model()

        # Build context
        context = {
            "domain": domain,
            "task": task,
            "mental_model": mental_model,
            "timestamp": datetime.now().isoformat()
        }

        # Execute (with custom executor or default)
        if executor_fn:
            result = await executor_fn(context)
        else:
            # Default: return context (in production, would call LLM)
            result = {
                "status": "executed",
                "domain": domain,
                "task": task,
                "expertise_loaded": True,
                "mental_model_size": len(mental_model)
            }

        # Self-improve step would update expertise here
        # In production: analyze result and update expertise.yaml

        return result

    def create_expert_prompt(self, domain: str) -> str:
        """
        Create a prompt template for a domain expert.

        This is meta-prompting: a prompt that generates prompts.
        """
        description = self.DOMAINS.get(domain, f"Expert for {domain}")

        return f'''# {domain.title()} Expert Agent

## Role
You are a {domain} expert for the Genesis autonomous development system.

## Description
{description}

## Expertise Location
Load your mental model from: expertise/{domain}/expertise.yaml

## Workflow

### Before Task
1. Read expertise file to load mental model
2. Validate understanding against actual codebase
3. Identify relevant patterns and past learnings

### During Task
1. Apply domain expertise
2. Follow established patterns
3. Document decisions and rationale

### After Task
1. Identify what was learned
2. Update expertise file with new patterns
3. Log changes to DEVLOG.md

## Key Principle
> "One executes and forgets, the other executes and learns."

You are an expert that LEARNS. After every action, update your expertise.

## Self-Improve Prompt
Run after significant work: expertise/{domain}/self_improve.md
'''


# CLI Interface
if __name__ == "__main__":
    if len(sys.argv) < 2:
        print("""
Genesis Meta-Agent
==================

Commands:
  list-domains              List available domains
  spawn <domain>            Spawn a domain expert
  prompt <domain>           Generate expert prompt template
  execute <domain> <task>   Execute task with domain expert

Examples:
  python meta_agent.py list-domains
  python meta_agent.py spawn code_review
  python meta_agent.py prompt testing
  python meta_agent.py execute documentation "Document the orchestrator"
        """)
        sys.exit(0)

    command = sys.argv[1]
    meta = MetaAgent()

    if command == "list-domains":
        print("Available Domains:")
        for domain, desc in meta.DOMAINS.items():
            print(f"  {domain}: {desc}")

    elif command == "spawn" and len(sys.argv) > 2:
        domain = sys.argv[2]
        expert = meta.spawn_expert(domain)
        print(f"Spawned expert: {domain}")
        print(f"  Expertise: {expert.expertise_path}")
        print(f"  Self-improve: {expert.self_improve_path}")

    elif command == "prompt" and len(sys.argv) > 2:
        domain = sys.argv[2]
        print(meta.create_expert_prompt(domain))

    elif command == "execute" and len(sys.argv) > 3:
        domain = sys.argv[2]
        task = sys.argv[3]
        result = asyncio.run(meta.execute_with_expert(domain, task))
        print(json.dumps(result, indent=2))

    else:
        print(f"Unknown command: {command}")
        sys.exit(1)
