#!/usr/bin/env python3
"""
Genesis Core MCP Server
========================
Exposes Genesis system capabilities via Model Context Protocol.

Tools:
- search_memory: Search Genesis knowledge graph
- store_memory: Store new knowledge
- trigger_workflow: Execute n8n workflows
- execute_skill: Run Genesis skills
- get_status: System status check

Resources:
- genesis://status - Current system state
- genesis://memory/{type} - Memory tier contents
- genesis://skills - Available skills list
- genesis://workflows - Active n8n workflows

Prompts:
- evolution_analysis - System evolution report
- skill_creation - New skill design template
"""

import json
import os
import asyncio
import subprocess
from datetime import datetime
from pathlib import Path
from typing import Optional, Any
from dataclasses import dataclass, field

# MCP SDK imports
try:
    from mcp.server.fastmcp import FastMCP
except ImportError:
    print("Installing MCP SDK...")
    subprocess.run(["pip", "install", "mcp"], check=True)
    from mcp.server.fastmcp import FastMCP

# =============================================================================
# Configuration
# =============================================================================

GENESIS_ROOT = Path("/mnt/e/genesis-system")
MEMORY_FILE = GENESIS_ROOT / "memory" / "knowledge_graph.json"
SKILLS_DIR = GENESIS_ROOT / "skills"
N8N_BASE_URL = os.environ.get("N8N_BASE_URL", "http://localhost:5678")

# =============================================================================
# Server Initialization
# =============================================================================

mcp = FastMCP("genesis-core")

# =============================================================================
# Helper Functions
# =============================================================================

def load_memory_graph() -> dict:
    """Load the Genesis knowledge graph from file."""
    if MEMORY_FILE.exists():
        with open(MEMORY_FILE, "r") as f:
            return json.load(f)
    return {"entities": [], "relations": []}


def save_memory_graph(graph: dict) -> None:
    """Save the Genesis knowledge graph to file."""
    MEMORY_FILE.parent.mkdir(parents=True, exist_ok=True)
    with open(MEMORY_FILE, "w") as f:
        json.dump(graph, f, indent=2)


def get_available_skills() -> list:
    """List available Genesis skills."""
    skills = []
    if SKILLS_DIR.exists():
        for skill_file in SKILLS_DIR.glob("*.py"):
            if skill_file.name != "__init__.py":
                skills.append({
                    "name": skill_file.stem,
                    "path": str(skill_file),
                    "type": "python"
                })
        for skill_file in SKILLS_DIR.glob("*.md"):
            skills.append({
                "name": skill_file.stem,
                "path": str(skill_file),
                "type": "prompt"
            })
    return skills


def get_workflow_status() -> list:
    """Get status of n8n workflows (simplified)."""
    # This would normally call n8n API
    return [
        {"name": "Genesis Discovery Notifier", "active": True},
        {"name": "Genesis Continuous Evolution", "active": True},
        {"name": "Genesis Memory Maintenance", "active": True},
        {"name": "Genesis Lead Capture Pipeline", "active": False},
        {"name": "Genesis Stripe Payment Handler", "active": False},
        {"name": "Genesis Email Drip Sequence", "active": True},
    ]


# =============================================================================
# Tools
# =============================================================================

@mcp.tool()
def search_memory(
    query: str,
    entity_type: Optional[str] = None,
    limit: int = 10
) -> str:
    """Search the Genesis knowledge graph for relevant information.

    Args:
        query: Search query (matches entity names and observations)
        entity_type: Optional filter by entity type
        limit: Maximum results to return (default 10)

    Returns:
        JSON string with matching entities and their observations
    """
    graph = load_memory_graph()
    results = []
    query_lower = query.lower()

    for entity in graph.get("entities", []):
        # Check if query matches entity name or observations
        name_match = query_lower in entity.get("name", "").lower()
        obs_match = any(
            query_lower in obs.lower()
            for obs in entity.get("observations", [])
        )
        type_match = entity_type is None or entity.get("entityType") == entity_type

        if (name_match or obs_match) and type_match:
            results.append(entity)
            if len(results) >= limit:
                break

    return json.dumps({
        "query": query,
        "total": len(results),
        "results": results
    }, indent=2)


@mcp.tool()
def store_memory(
    name: str,
    entity_type: str,
    observations: list[str]
) -> str:
    """Store a new entity in the Genesis knowledge graph.

    Args:
        name: Entity name (unique identifier)
        entity_type: Type of entity (e.g., 'skill', 'workflow', 'concept')
        observations: List of observations/facts about this entity

    Returns:
        Confirmation with entity details
    """
    graph = load_memory_graph()

    # Check if entity already exists
    existing = next(
        (e for e in graph["entities"] if e["name"] == name),
        None
    )

    if existing:
        # Update existing entity
        existing["observations"].extend(observations)
        existing["observations"] = list(set(existing["observations"]))
        existing["updatedAt"] = datetime.now().isoformat()
        action = "updated"
    else:
        # Create new entity
        new_entity = {
            "name": name,
            "entityType": entity_type,
            "observations": observations,
            "createdAt": datetime.now().isoformat()
        }
        graph["entities"].append(new_entity)
        action = "created"

    save_memory_graph(graph)

    return json.dumps({
        "status": "success",
        "action": action,
        "entity": name,
        "type": entity_type,
        "observations_count": len(observations)
    }, indent=2)


@mcp.tool()
def create_relation(
    from_entity: str,
    relation_type: str,
    to_entity: str
) -> str:
    """Create a relationship between two entities in the knowledge graph.

    Args:
        from_entity: Source entity name
        relation_type: Type of relationship (e.g., 'uses', 'depends_on', 'creates')
        to_entity: Target entity name

    Returns:
        Confirmation of created relation
    """
    graph = load_memory_graph()

    # Verify both entities exist
    entity_names = {e["name"] for e in graph.get("entities", [])}
    if from_entity not in entity_names:
        return json.dumps({"error": f"Entity '{from_entity}' not found"})
    if to_entity not in entity_names:
        return json.dumps({"error": f"Entity '{to_entity}' not found"})

    # Add relation
    new_relation = {
        "from": from_entity,
        "to": to_entity,
        "relationType": relation_type,
        "createdAt": datetime.now().isoformat()
    }

    if "relations" not in graph:
        graph["relations"] = []

    graph["relations"].append(new_relation)
    save_memory_graph(graph)

    return json.dumps({
        "status": "success",
        "relation": f"{from_entity} --[{relation_type}]--> {to_entity}"
    }, indent=2)


@mcp.tool()
def trigger_workflow(
    workflow_name: str,
    input_data: Optional[dict] = None
) -> str:
    """Trigger an n8n workflow by name.

    Args:
        workflow_name: Name of the workflow (used as webhook path)
        input_data: Optional data to pass to the workflow

    Returns:
        Webhook trigger result
    """
    import urllib.request
    import urllib.error

    webhook_url = f"{N8N_BASE_URL}/webhook/{workflow_name}"

    try:
        data = json.dumps(input_data or {}).encode("utf-8")
        req = urllib.request.Request(
            webhook_url,
            data=data,
            headers={"Content-Type": "application/json"},
            method="POST"
        )

        with urllib.request.urlopen(req, timeout=30) as response:
            result = response.read().decode("utf-8")
            return json.dumps({
                "status": "triggered",
                "workflow": workflow_name,
                "response": json.loads(result) if result else None
            }, indent=2)
    except urllib.error.URLError as e:
        return json.dumps({
            "status": "error",
            "workflow": workflow_name,
            "error": str(e)
        }, indent=2)


@mcp.tool()
def execute_skill(
    skill_name: str,
    parameters: Optional[dict] = None
) -> str:
    """Execute a Genesis skill by name.

    Args:
        skill_name: Name of the skill to execute
        parameters: Optional parameters for the skill

    Returns:
        Skill execution result
    """
    skills = get_available_skills()
    skill = next((s for s in skills if s["name"] == skill_name), None)

    if not skill:
        return json.dumps({
            "status": "error",
            "error": f"Skill '{skill_name}' not found",
            "available_skills": [s["name"] for s in skills]
        }, indent=2)

    if skill["type"] == "python":
        # For now, return info about the skill
        # Full execution would require dynamic loading
        return json.dumps({
            "status": "info",
            "skill": skill_name,
            "path": skill["path"],
            "message": "Python skill execution requires direct import",
            "parameters": parameters
        }, indent=2)
    elif skill["type"] == "prompt":
        # Read and return prompt content
        with open(skill["path"], "r") as f:
            content = f.read()
        return json.dumps({
            "status": "success",
            "skill": skill_name,
            "type": "prompt",
            "content": content[:1000] + "..." if len(content) > 1000 else content
        }, indent=2)

    return json.dumps({"status": "error", "error": "Unknown skill type"})


@mcp.tool()
def get_status() -> str:
    """Get current Genesis system status.

    Returns:
        JSON with system status, memory stats, active workflows
    """
    graph = load_memory_graph()
    skills = get_available_skills()
    workflows = get_workflow_status()

    return json.dumps({
        "status": "operational",
        "timestamp": datetime.now().isoformat(),
        "memory": {
            "entities": len(graph.get("entities", [])),
            "relations": len(graph.get("relations", []))
        },
        "skills": {
            "total": len(skills),
            "python": len([s for s in skills if s["type"] == "python"]),
            "prompts": len([s for s in skills if s["type"] == "prompt"])
        },
        "workflows": {
            "total": len(workflows),
            "active": len([w for w in workflows if w["active"]])
        },
        "version": "1.0.0"
    }, indent=2)


# =============================================================================
# Resources
# =============================================================================

@mcp.resource("genesis://status")
def resource_status() -> str:
    """Current Genesis system status."""
    return get_status()


@mcp.resource("genesis://memory/entities")
def resource_memory_entities() -> str:
    """All entities in the knowledge graph."""
    graph = load_memory_graph()
    return json.dumps({
        "count": len(graph.get("entities", [])),
        "entities": graph.get("entities", [])
    }, indent=2)


@mcp.resource("genesis://memory/relations")
def resource_memory_relations() -> str:
    """All relations in the knowledge graph."""
    graph = load_memory_graph()
    return json.dumps({
        "count": len(graph.get("relations", [])),
        "relations": graph.get("relations", [])
    }, indent=2)


@mcp.resource("genesis://skills")
def resource_skills() -> str:
    """Available Genesis skills."""
    skills = get_available_skills()
    return json.dumps({
        "count": len(skills),
        "skills": skills
    }, indent=2)


@mcp.resource("genesis://workflows")
def resource_workflows() -> str:
    """N8N workflow status."""
    workflows = get_workflow_status()
    return json.dumps({
        "count": len(workflows),
        "active": len([w for w in workflows if w["active"]]),
        "workflows": workflows
    }, indent=2)


@mcp.resource("genesis://config")
def resource_config() -> str:
    """Genesis configuration."""
    return json.dumps({
        "genesis_root": str(GENESIS_ROOT),
        "memory_file": str(MEMORY_FILE),
        "skills_dir": str(SKILLS_DIR),
        "n8n_base_url": N8N_BASE_URL,
        "mcp_version": "1.0.0"
    }, indent=2)


# =============================================================================
# Prompts
# =============================================================================

@mcp.prompt()
def evolution_analysis(period: str = "weekly") -> str:
    """Generate a Genesis evolution analysis prompt."""
    graph = load_memory_graph()
    entity_count = len(graph.get("entities", []))

    return f"""Analyze the Genesis system's evolution over the past {period}.

Current State:
- Knowledge Graph: {entity_count} entities
- Skills: {len(get_available_skills())} available
- Workflows: {len([w for w in get_workflow_status() if w['active']])} active

Analyze:
1. New capabilities integrated this {period}
2. Memory growth patterns
3. Workflow efficiency
4. Skill utilization
5. Areas requiring attention

Provide:
- Executive summary
- Key achievements
- Recommendations for next evolution targets
- Risk assessment for proposed changes
"""


@mcp.prompt()
def skill_creation(capability: str, requirements: str = "") -> str:
    """Generate a prompt for creating a new Genesis skill."""
    return f"""Design a new Genesis skill for: {capability}

Requirements:
{requirements or "No specific requirements provided"}

Skill Specification:
1. Name and purpose
2. Input parameters (with types and validation)
3. Output format
4. Dependencies (MCP servers, APIs, libraries)
5. Error handling strategy
6. Testing approach

Implementation:
- Provide Python code using FastMCP decorators
- Include docstrings and type hints
- Add example usage

Integration:
- How this skill connects to existing Genesis infrastructure
- Memory entities it will create/update
- Workflows it may trigger
"""


@mcp.prompt()
def memory_consolidation() -> str:
    """Generate a prompt for memory consolidation analysis."""
    graph = load_memory_graph()
    entity_types = {}
    for e in graph.get("entities", []):
        t = e.get("entityType", "unknown")
        entity_types[t] = entity_types.get(t, 0) + 1

    type_summary = "\n".join([f"  - {t}: {c}" for t, c in entity_types.items()])

    return f"""Analyze Genesis memory for consolidation opportunities.

Current Memory State:
- Total Entities: {len(graph.get('entities', []))}
- Total Relations: {len(graph.get('relations', []))}
- Entity Types:
{type_summary}

Tasks:
1. Identify duplicate or redundant entities
2. Find orphaned entities (no relations)
3. Detect missing relations between related concepts
4. Suggest entity merges or splits
5. Recommend new entity types for better organization

Output:
- Consolidation report
- Specific merge/split recommendations
- Relation additions
- Priority ranking
"""


# =============================================================================
# Main Entry Point
# =============================================================================

if __name__ == "__main__":
    # Run the MCP server
    mcp.run()
