#!/usr/bin/env python3
"""
Genesis Core MCP Server
========================
Exposes Genesis system capabilities via Model Context Protocol.

Tools:
- search_memory: Search Genesis knowledge graph
- store_memory: Store new knowledge
- trigger_workflow: Execute n8n workflows
- execute_skill: Run Genesis skills
- get_status: System status check
- gemini_ctm_flush: Commit Gemini session state to handoff files
- gemini_respawn_signal: Signal that Gemini needs to respawn
- gemini_read_boot_context: Read boot context for new Gemini sessions

Resources:
- genesis://status - Current system state
- genesis://memory/{type} - Memory tier contents
- genesis://skills - Available skills list
- genesis://workflows - Active n8n workflows

Prompts:
- evolution_analysis - System evolution report
- skill_creation - New skill design template
"""

import json
import os
import asyncio
import subprocess
from datetime import datetime
from pathlib import Path
from typing import Optional, Any
from dataclasses import dataclass, field

# MCP SDK imports
try:
    from mcp.server.fastmcp import FastMCP
except ImportError:
    print("Installing MCP SDK...")
    subprocess.run(["pip", "install", "mcp"], check=True)
    from mcp.server.fastmcp import FastMCP

# Gemini CTM imports
try:
    from gemini_ctm import register_gemini_tools
    GEMINI_CTM_AVAILABLE = True
except ImportError:
    GEMINI_CTM_AVAILABLE = False
    print("Warning: gemini_ctm.py not found - CTM tools will not be available")

# KB RAG Tools imports
try:
    from kb_tools import register_kb_tools
    KB_TOOLS_AVAILABLE = True
except ImportError as _kb_err:
    KB_TOOLS_AVAILABLE = False
    print(f"Warning: kb_tools.py not available — KB MCP tools disabled: {_kb_err}")

# =============================================================================
# Configuration
# =============================================================================

GENESIS_ROOT = Path("/mnt/e/genesis-system")
MEMORY_FILE = GENESIS_ROOT / "memory" / "knowledge_graph.json"
SKILLS_DIR = GENESIS_ROOT / "skills"
N8N_BASE_URL = os.environ.get("N8N_BASE_URL", "http://localhost:5678")

# =============================================================================
# Server Initialization
# =============================================================================

mcp = FastMCP("genesis-core")

# =============================================================================
# Helper Functions
# =============================================================================

def load_memory_graph() -> dict:
    """Load the Genesis knowledge graph from file."""
    if MEMORY_FILE.exists():
        with open(MEMORY_FILE, "r") as f:
            return json.load(f)
    return {"entities": [], "relations": []}


def save_memory_graph(graph: dict) -> None:
    """Save the Genesis knowledge graph to file."""
    MEMORY_FILE.parent.mkdir(parents=True, exist_ok=True)
    with open(MEMORY_FILE, "w") as f:
        json.dump(graph, f, indent=2)


def get_available_skills() -> list:
    """List available Genesis skills."""
    skills = []
    if SKILLS_DIR.exists():
        for skill_file in SKILLS_DIR.glob("*.py"):
            if skill_file.name != "__init__.py":
                skills.append({
                    "name": skill_file.stem,
                    "path": str(skill_file),
                    "type": "python"
                })
        for skill_file in SKILLS_DIR.glob("*.md"):
            skills.append({
                "name": skill_file.stem,
                "path": str(skill_file),
                "type": "prompt"
            })
    return skills


def get_workflow_status() -> list:
    """Get status of n8n workflows (simplified)."""
    # This would normally call n8n API
    return [
        {"name": "Genesis Discovery Notifier", "active": True},
        {"name": "Genesis Continuous Evolution", "active": True},
        {"name": "Genesis Memory Maintenance", "active": True},
        {"name": "Genesis Lead Capture Pipeline", "active": False},
        {"name": "Genesis Stripe Payment Handler", "active": False},
        {"name": "Genesis Email Drip Sequence", "active": True},
    ]


# =============================================================================
# Tools
# =============================================================================

@mcp.tool()
def search_memory(
    query: str,
    entity_type: Optional[str] = None,
    limit: int = 10
) -> str:
    """Search the Genesis knowledge graph for relevant information.

    Args:
        query: Search query (matches entity names and observations)
        entity_type: Optional filter by entity type
        limit: Maximum results to return (default 10)

    Returns:
        JSON string with matching entities and their observations
    """
    graph = load_memory_graph()
    results = []
    query_lower = query.lower()

    for entity in graph.get("entities", []):
        # Check if query matches entity name or observations
        name_match = query_lower in entity.get("name", "").lower()
        obs_match = any(
            query_lower in obs.lower()
            for obs in entity.get("observations", [])
        )
        type_match = entity_type is None or entity.get("entityType") == entity_type

        if (name_match or obs_match) and type_match:
            results.append(entity)
            if len(results) >= limit:
                break

    return json.dumps({
        "query": query,
        "total": len(results),
        "results": results
    }, indent=2)


@mcp.tool()
def store_memory(
    name: str,
    entity_type: str,
    observations: list[str]
) -> str:
    """Store a new entity in the Genesis knowledge graph.

    Args:
        name: Entity name (unique identifier)
        entity_type: Type of entity (e.g., 'skill', 'workflow', 'concept')
        observations: List of observations/facts about this entity

    Returns:
        Confirmation with entity details
    """
    graph = load_memory_graph()

    # Check if entity already exists
    existing = next(
        (e for e in graph["entities"] if e["name"] == name),
        None
    )

    if existing:
        # Update existing entity
        existing["observations"].extend(observations)
        existing["observations"] = list(set(existing["observations"]))
        existing["updatedAt"] = datetime.now().isoformat()
        action = "updated"
    else:
        # Create new entity
        new_entity = {
            "name": name,
            "entityType": entity_type,
            "observations": observations,
            "createdAt": datetime.now().isoformat()
        }
        graph["entities"].append(new_entity)
        action = "created"

    save_memory_graph(graph)

    return json.dumps({
        "status": "success",
        "action": action,
        "entity": name,
        "type": entity_type,
        "observations_count": len(observations)
    }, indent=2)


@mcp.tool()
def create_relation(
    from_entity: str,
    relation_type: str,
    to_entity: str
) -> str:
    """Create a relationship between two entities in the knowledge graph.

    Args:
        from_entity: Source entity name
        relation_type: Type of relationship (e.g., 'uses', 'depends_on', 'creates')
        to_entity: Target entity name

    Returns:
        Confirmation of created relation
    """
    graph = load_memory_graph()

    # Verify both entities exist
    entity_names = {e["name"] for e in graph.get("entities", [])}
    if from_entity not in entity_names:
        return json.dumps({"error": f"Entity '{from_entity}' not found"})
    if to_entity not in entity_names:
        return json.dumps({"error": f"Entity '{to_entity}' not found"})

    # Add relation
    new_relation = {
        "from": from_entity,
        "to": to_entity,
        "relationType": relation_type,
        "createdAt": datetime.now().isoformat()
    }

    if "relations" not in graph:
        graph["relations"] = []

    graph["relations"].append(new_relation)
    save_memory_graph(graph)

    return json.dumps({
        "status": "success",
        "relation": f"{from_entity} --[{relation_type}]--> {to_entity}"
    }, indent=2)


@mcp.tool()
def trigger_workflow(
    workflow_name: str,
    input_data: Optional[dict] = None
) -> str:
    """Trigger an n8n workflow by name.

    Args:
        workflow_name: Name of the workflow (used as webhook path)
        input_data: Optional data to pass to the workflow

    Returns:
        Webhook trigger result
    """
    import urllib.request
    import urllib.error

    webhook_url = f"{N8N_BASE_URL}/webhook/{workflow_name}"

    try:
        data = json.dumps(input_data or {}).encode("utf-8")
        req = urllib.request.Request(
            webhook_url,
            data=data,
            headers={"Content-Type": "application/json"},
            method="POST"
        )

        with urllib.request.urlopen(req, timeout=30) as response:
            result = response.read().decode("utf-8")
            return json.dumps({
                "status": "triggered",
                "workflow": workflow_name,
                "response": json.loads(result) if result else None
            }, indent=2)
    except urllib.error.URLError as e:
        return json.dumps({
            "status": "error",
            "workflow": workflow_name,
            "error": str(e)
        }, indent=2)


@mcp.tool()
def execute_skill(
    skill_name: str,
    parameters: Optional[dict] = None
) -> str:
    """Execute a Genesis skill by name.

    Args:
        skill_name: Name of the skill to execute
        parameters: Optional parameters for the skill

    Returns:
        Skill execution result
    """
    skills = get_available_skills()
    skill = next((s for s in skills if s["name"] == skill_name), None)

    if not skill:
        return json.dumps({
            "status": "error",
            "error": f"Skill '{skill_name}' not found",
            "available_skills": [s["name"] for s in skills]
        }, indent=2)

    if skill["type"] == "python":
        # For now, return info about the skill
        # Full execution would require dynamic loading
        return json.dumps({
            "status": "info",
            "skill": skill_name,
            "path": skill["path"],
            "message": "Python skill execution requires direct import",
            "parameters": parameters
        }, indent=2)
    elif skill["type"] == "prompt":
        # Read and return prompt content
        with open(skill["path"], "r") as f:
            content = f.read()
        return json.dumps({
            "status": "success",
            "skill": skill_name,
            "type": "prompt",
            "content": content[:1000] + "..." if len(content) > 1000 else content
        }, indent=2)

    return json.dumps({"status": "error", "error": "Unknown skill type"})


@mcp.tool()
def get_status() -> str:
    """Get current Genesis system status.

    Returns:
        JSON with system status, memory stats, active workflows
    """
    graph = load_memory_graph()
    skills = get_available_skills()
    workflows = get_workflow_status()

    return json.dumps({
        "status": "operational",
        "timestamp": datetime.now().isoformat(),
        "memory": {
            "entities": len(graph.get("entities", [])),
            "relations": len(graph.get("relations", []))
        },
        "skills": {
            "total": len(skills),
            "python": len([s for s in skills if s["type"] == "python"]),
            "prompts": len([s for s in skills if s["type"] == "prompt"])
        },
        "workflows": {
            "total": len(workflows),
            "active": len([w for w in workflows if w["active"]])
        },
        "version": "1.0.0"
    }, indent=2)


# =============================================================================
# Resources
# =============================================================================

@mcp.resource("genesis://status")
def resource_status() -> str:
    """Current Genesis system status."""
    return get_status()


@mcp.resource("genesis://memory/entities")
def resource_memory_entities() -> str:
    """All entities in the knowledge graph."""
    graph = load_memory_graph()
    return json.dumps({
        "count": len(graph.get("entities", [])),
        "entities": graph.get("entities", [])
    }, indent=2)


@mcp.resource("genesis://memory/relations")
def resource_memory_relations() -> str:
    """All relations in the knowledge graph."""
    graph = load_memory_graph()
    return json.dumps({
        "count": len(graph.get("relations", [])),
        "relations": graph.get("relations", [])
    }, indent=2)


@mcp.resource("genesis://skills")
def resource_skills() -> str:
    """Available Genesis skills."""
    skills = get_available_skills()
    return json.dumps({
        "count": len(skills),
        "skills": skills
    }, indent=2)


@mcp.resource("genesis://workflows")
def resource_workflows() -> str:
    """N8N workflow status."""
    workflows = get_workflow_status()
    return json.dumps({
        "count": len(workflows),
        "active": len([w for w in workflows if w["active"]]),
        "workflows": workflows
    }, indent=2)


@mcp.resource("genesis://config")
def resource_config() -> str:
    """Genesis configuration."""
    return json.dumps({
        "genesis_root": str(GENESIS_ROOT),
        "memory_file": str(MEMORY_FILE),
        "skills_dir": str(SKILLS_DIR),
        "n8n_base_url": N8N_BASE_URL,
        "mcp_version": "1.0.0"
    }, indent=2)


# =============================================================================
# Prompts
# =============================================================================

@mcp.prompt()
def evolution_analysis(period: str = "weekly") -> str:
    """Generate a Genesis evolution analysis prompt."""
    graph = load_memory_graph()
    entity_count = len(graph.get("entities", []))

    return f"""Analyze the Genesis system's evolution over the past {period}.

Current State:
- Knowledge Graph: {entity_count} entities
- Skills: {len(get_available_skills())} available
- Workflows: {len([w for w in get_workflow_status() if w['active']])} active

Analyze:
1. New capabilities integrated this {period}
2. Memory growth patterns
3. Workflow efficiency
4. Skill utilization
5. Areas requiring attention

Provide:
- Executive summary
- Key achievements
- Recommendations for next evolution targets
- Risk assessment for proposed changes
"""


@mcp.prompt()
def skill_creation(capability: str, requirements: str = "") -> str:
    """Generate a prompt for creating a new Genesis skill."""
    return f"""Design a new Genesis skill for: {capability}

Requirements:
{requirements or "No specific requirements provided"}

Skill Specification:
1. Name and purpose
2. Input parameters (with types and validation)
3. Output format
4. Dependencies (MCP servers, APIs, libraries)
5. Error handling strategy
6. Testing approach

Implementation:
- Provide Python code using FastMCP decorators
- Include docstrings and type hints
- Add example usage

Integration:
- How this skill connects to existing Genesis infrastructure
- Memory entities it will create/update
- Workflows it may trigger
"""


@mcp.prompt()
def memory_consolidation() -> str:
    """Generate a prompt for memory consolidation analysis."""
    graph = load_memory_graph()
    entity_types = {}
    for e in graph.get("entities", []):
        t = e.get("entityType", "unknown")
        entity_types[t] = entity_types.get(t, 0) + 1

    type_summary = "\n".join([f"  - {t}: {c}" for t, c in entity_types.items()])

    return f"""Analyze Genesis memory for consolidation opportunities.

Current Memory State:
- Total Entities: {len(graph.get('entities', []))}
- Total Relations: {len(graph.get('relations', []))}
- Entity Types:
{type_summary}

Tasks:
1. Identify duplicate or redundant entities
2. Find orphaned entities (no relations)
3. Detect missing relations between related concepts
4. Suggest entity merges or splits
5. Recommend new entity types for better organization

Output:
- Consolidation report
- Specific merge/split recommendations
- Relation additions
- Priority ranking
"""


# =============================================================================
# SubAIVA Knowledge Store — Search 1,600+ ingested documents (FTS5 BM25 scoring)
# =============================================================================

SUBAIVA_WORKER_URL = "https://subaiva.kinan-ae7.workers.dev"
SUBAIVA_API_TOKEN = os.environ.get("SUBAIVA_API_TOKEN", "dev-token-genesis-2026")


@mcp.tool()
def search_subaiva_knowledge(
    query: str,
    category: Optional[str] = None,
    limit: int = 10
) -> str:
    """Search the SubAIVA Knowledge Store (1,600+ docs with FTS5 BM25 scoring).

    Searches AI-generated summaries and key facts from all ingested documents
    including deep think results, platform KBs, KG axioms, research reports,
    and .gemini/knowledge files. Results are ranked by BM25 relevance score.

    Args:
        query: Search query (full-text search with BM25 ranking)
        category: Optional category filter (e.g. 'ghl', 'voice', 'axiom', 'entity', 'genesis')
        limit: Maximum results to return (default 10)

    Returns:
        JSON with matching documents including title, summary, key_facts, source, score
    """
    import urllib.request
    import urllib.error

    url = f"{SUBAIVA_WORKER_URL}/api/swarm/search"
    payload = {"query": query, "limit": limit}
    if category:
        payload["category"] = category
    body = json.dumps(payload).encode("utf-8")

    req = urllib.request.Request(
        url,
        data=body,
        headers={
            "Content-Type": "application/json",
            "Authorization": f"Bearer {SUBAIVA_API_TOKEN}"
        },
        method="POST"
    )

    try:
        with urllib.request.urlopen(req, timeout=15) as response:
            data = json.loads(response.read().decode("utf-8"))

        results = data.get("results", [])

        return json.dumps({
            "query": query,
            "category": category,
            "total": len(results),
            "results": [
                {
                    "title": r.get("title", ""),
                    "summary": r.get("summary", ""),
                    "key_facts": r.get("keyFacts", r.get("key_facts", "")),
                    "category": r.get("category", ""),
                    "source": r.get("source", ""),
                    "score": r.get("score", 0),
                }
                for r in results
            ]
        }, indent=2)

    except urllib.error.URLError as e:
        return json.dumps({
            "error": f"SubAIVA worker unreachable: {e}",
            "hint": "Check if subaiva.kinan-ae7.workers.dev is deployed"
        }, indent=2)
    except Exception as e:
        return json.dumps({"error": str(e)}, indent=2)


@mcp.tool()
def hybrid_search_subaiva_knowledge(
    query: str,
    category: Optional[str] = None,
    limit: int = 10
) -> str:
    """Hybrid search the SubAIVA Knowledge Store — combines BM25 text matching + vector cosine similarity.

    Better for semantic queries like 'how to monetize AI agents' where exact keyword
    match is less important than meaning. Returns bm25Score + vectorScore + hybrid score.

    Args:
        query: Search query (will be matched via both text and vector embedding)
        category: Optional category filter
        limit: Maximum results to return (default 10)

    Returns:
        JSON with matching documents including hybrid score, bm25Score, vectorScore
    """
    import urllib.request

    url = f"{SUBAIVA_WORKER_URL}/api/swarm/hybrid-search"
    payload = {"query": query, "limit": limit}
    if category:
        payload["category"] = category
    body = json.dumps(payload).encode("utf-8")

    req = urllib.request.Request(
        url,
        data=body,
        headers={
            "Content-Type": "application/json",
            "Authorization": f"Bearer {SUBAIVA_API_TOKEN}"
        },
        method="POST"
    )

    try:
        with urllib.request.urlopen(req, timeout=30) as response:
            data = json.loads(response.read().decode("utf-8"))

        results = data.get("results", [])

        return json.dumps({
            "query": query,
            "category": category,
            "total": len(results),
            "results": [
                {
                    "title": r.get("title", ""),
                    "summary": r.get("summary", ""),
                    "key_facts": r.get("keyFacts", r.get("key_facts", "")),
                    "category": r.get("category", ""),
                    "source": r.get("source", ""),
                    "score": r.get("score", 0),
                    "bm25Score": r.get("bm25Score", 0),
                    "vectorScore": r.get("vectorScore", 0),
                }
                for r in results
            ]
        }, indent=2)

    except Exception as e:
        return json.dumps({"error": str(e)}, indent=2)


@mcp.tool()
def get_subaiva_knowledge_stats() -> str:
    """Get statistics about the SubAIVA Knowledge Store — document count, categories, tokens, embedding coverage.

    Returns:
        JSON with total documents, token usage, embedding stats, and category breakdown
    """
    import urllib.request
    import urllib.error

    url = f"{SUBAIVA_WORKER_URL}/api/swarm/stats"
    req = urllib.request.Request(
        url,
        headers={"Authorization": f"Bearer {SUBAIVA_API_TOKEN}"},
        method="GET"
    )

    try:
        with urllib.request.urlopen(req, timeout=10) as response:
            data = json.loads(response.read().decode("utf-8"))
        return json.dumps(data, indent=2)
    except urllib.error.URLError as e:
        return json.dumps({"error": f"SubAIVA worker unreachable: {e}"}, indent=2)
    except Exception as e:
        return json.dumps({"error": str(e)}, indent=2)


# =============================================================================
# Browser Skill Engine MCP Tools
# =============================================================================

try:
    import sys as _sys
    _sys.path.insert(0, "/mnt/e/genesis-system")
    from core.browser_skill_engine import (
        mcp_execute_browser_skill,
        mcp_list_available_skills,
        mcp_learn_browser_skill,
    )
    BROWSER_SKILLS_AVAILABLE = True
except ImportError as _bse_err:
    BROWSER_SKILLS_AVAILABLE = False
    print(f"Warning: browser_skill_engine not available: {_bse_err}")


if BROWSER_SKILLS_AVAILABLE:

    @mcp.tool()
    def execute_browser_skill(skill_id: str, params: str = "{}") -> str:
        """
        Execute a pre-mapped browser automation skill using Gemini vision.

        Args:
            skill_id: The skill identifier (e.g. 'ghl_create_subaccount_api_integration')
            params: JSON string of parameters required by the skill

        Returns:
            JSON string with execution result including success, outputs, and action history
        """
        import json
        try:
            params_dict = json.loads(params) if isinstance(params, str) else params
        except json.JSONDecodeError as e:
            return json.dumps({"success": False, "error": f"Invalid params JSON: {e}"})
        result = mcp_execute_browser_skill(skill_id, params_dict)
        return json.dumps(result, indent=2)

    @mcp.tool()
    def list_available_skills(platform: str = "") -> str:
        """
        List all available browser automation skills.

        Args:
            platform: Optional platform filter (e.g. 'gohighlevel', 'telnyx', 'elestio')

        Returns:
            JSON string with list of skills and their metadata
        """
        import json
        platform_filter = platform if platform else None
        skills = mcp_list_available_skills(platform_filter)
        return json.dumps(skills, indent=2)

    @mcp.tool()
    def learn_browser_skill(platform: str, goal: str) -> str:
        """
        Have Gemini explore a platform UI and map a new browser workflow skill.

        Args:
            platform: Platform name (e.g. 'gohighlevel', 'telnyx', 'godaddy')
            goal: Description of what the skill should accomplish

        Returns:
            JSON string with the new skill definition and file path
        """
        import json
        result = mcp_learn_browser_skill(platform, goal)
        return json.dumps(result, indent=2)


# =============================================================================
# Gemini CTM Tools Registration
# =============================================================================

if GEMINI_CTM_AVAILABLE:
    # Register Gemini CTM tools (ctm_flush, respawn_signal, read_boot_context)
    register_gemini_tools(mcp)


# =============================================================================
# KB RAG Tools Registration
# =============================================================================

if KB_TOOLS_AVAILABLE:
    # Register KB tools (search_platform_kb, list_platform_kbs, ingest_platform_kb)
    register_kb_tools(mcp)


# =============================================================================
# Main Entry Point
# =============================================================================

if __name__ == "__main__":
    # Run the MCP server
    mcp.run()
