"""
Genesis Voice Bridge MCP Server
================================
Production MCP server exposing Genesis memory systems to Telnyx AI voice assistants.

Supports both SSE and Streamable HTTP transports for Telnyx compatibility.
Uses Starlette middleware for bearer token auth on the MCP endpoint.

Architecture:
    Telnyx AI Assistant -> (SSE/HTTP) -> This Server -> Genesis Memory Systems
                                                          |-> PostgreSQL (Elestio)
                                                          |-> Qdrant (vectors)
                                                          |-> Redis (cache)
                                                          |-> Supermemory (semantic)
                                                          |-> FalkorDB (knowledge graph)
"""

import os
import sys
import json
import time
import logging
import hashlib
from datetime import datetime, timedelta
from typing import Optional
from pathlib import Path

from fastmcp import FastMCP
from starlette.middleware import Middleware
from starlette.middleware.cors import CORSMiddleware
from starlette.requests import Request
from starlette.responses import JSONResponse
from starlette.types import ASGIApp, Receive, Scope, Send

# ---------------------------------------------------------------------------
# Logging
# ---------------------------------------------------------------------------
logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)s [%(name)s] %(levelname)s %(message)s",
    handlers=[logging.StreamHandler()],
)
logger = logging.getLogger("genesis.voice_bridge")

# ---------------------------------------------------------------------------
# Configuration
# ---------------------------------------------------------------------------
AUTH_TOKEN = os.environ.get(
    "GENESIS_MCP_AUTH_TOKEN",
    "genesis-voice-bridge-2026-production-key",
)
HOST = os.environ.get("GENESIS_MCP_HOST", "0.0.0.0")
PORT = int(os.environ.get("GENESIS_MCP_PORT", "8900"))
TRANSPORT = os.environ.get("GENESIS_MCP_TRANSPORT", "sse")  # sse or http

# Supermemory configuration
SUPERMEMORY_API_KEY = os.environ.get(
    "SUPERMEMORY_API_KEY",
    "sm_EWRhbQPEodMHkJd8Vbshpx_wCauANQAwJFvFfTwTTrujWzHTQajuJPRJLFwavESILxQZpmDiqfIbDAAfGCffQQb",
)
SUPERMEMORY_CONTAINER = os.environ.get("SUPERMEMORY_CONTAINER_TAG", "genesis-kinan")

# Paths
GENESIS_ROOT = Path(os.environ.get("GENESIS_ROOT", "/mnt/e/genesis-system"))
MEMORY_MD_PATH = GENESIS_ROOT / "MEMORY.md"
HIVE_PROGRESS_DIR = GENESIS_ROOT / "hive" / "progress"

# ---------------------------------------------------------------------------
# Elestio Infrastructure Connectors
# ---------------------------------------------------------------------------

# Add data directory to path for elestio_config
sys.path.insert(0, str(GENESIS_ROOT / "data" / "genesis-memory"))


def _get_pg_connection():
    """Get PostgreSQL connection using Elestio config."""
    try:
        import psycopg2
        from elestio_config import PostgresConfig
        conn = psycopg2.connect(**PostgresConfig.get_connection_params())
        return conn
    except Exception as e:
        logger.error("PostgreSQL connection failed: %s", e)
        return None


def _get_redis_client():
    """Get Redis client using Elestio config."""
    try:
        import redis as redis_lib
        from elestio_config import RedisConfig
        client = redis_lib.Redis(**RedisConfig.get_connection_params())
        client.ping()
        return client
    except Exception as e:
        logger.error("Redis connection failed: %s", e)
        return None


def _get_qdrant_client():
    """Get Qdrant client for vector search."""
    try:
        from qdrant_client import QdrantClient
        from elestio_config import QdrantConfig
        config = QdrantConfig()
        client = QdrantClient(url=config.url, api_key=config.api_key)
        return client
    except Exception as e:
        logger.error("Qdrant connection failed: %s", e)
        return None


def _get_falkordb_client():
    """Get FalkorDB client for knowledge graph queries."""
    try:
        from falkordb import FalkorDB
        host = os.environ.get("FALKORDB_HOST", "152.53.201.221")
        port = int(os.environ.get("FALKORDB_PORT", "23182"))
        client = FalkorDB(host=host, port=port, socket_timeout=5)
        return client
    except Exception as e:
        logger.error("FalkorDB connection failed: %s", e)
        return None


@mcp.tool
def query_knowledge_graph(query: str, query_type: str = "search") -> str:
    """
    Query the FalkorDB knowledge graph for entities, axioms, and relationships.

    Args:
        query: Search term or Cypher query fragment.
        query_type: Type of query - 'search' (entity name search),
                    'axioms' (find axioms), 'relationships' (find connections),
                    'stats' (graph statistics).

    Returns:
        Knowledge graph results formatted for voice delivery.
    """
    cache_key = _cache_key("kg", query, query_type)
    cached = _cache_get(cache_key)
    if cached:
        return cached

    client = _get_falkordb_client()
    if not client:
        return "Knowledge graph unavailable. FalkorDB connection failed."

    try:
        graph = client.select_graph("genesis_kinan")

        if query_type == "stats":
            node_result = graph.query("MATCH (n) RETURN count(n) as count")
            edge_result = graph.query("MATCH ()-[r]->() RETURN count(r) as count")
            node_count = node_result.result_set[0][0] if node_result.result_set else 0
            edge_count = edge_result.result_set[0][0] if edge_result.result_set else 0
            output = (
                f"Knowledge Graph Statistics:\n"
                f"  Nodes: {node_count}\n"
                f"  Edges: {edge_count}\n"
                f"  Graph: genesis_kinan (FalkorDB on Elestio)"
            )

        elif query_type == "axioms":
            result = graph.query(
                "MATCH (a) WHERE a.type = 'axiom' AND toLower(a.content) CONTAINS toLower($q) "
                "RETURN a.id, a.content, a.source LIMIT 10",
                params={"q": query},
            )
            if not result.result_set:
                output = f"No axioms found matching: {query}"
            else:
                output = f"Axioms matching '{query}':\n\n"
                for row in result.result_set:
                    output += f"  [{row[0]}] {row[1]}\n  Source: {row[2]}\n\n"

        elif query_type == "relationships":
            result = graph.query(
                "MATCH (a)-[r]->(b) WHERE toLower(a.name) CONTAINS toLower($q) "
                "OR toLower(b.name) CONTAINS toLower($q) "
                "RETURN a.name, type(r), b.name LIMIT 15",
                params={"q": query},
            )
            if not result.result_set:
                output = f"No relationships found for: {query}"
            else:
                output = f"Relationships for '{query}':\n\n"
                for row in result.result_set:
                    output += f"  {row[0]} --[{row[1]}]--> {row[2]}\n"

        else:  # search
            result = graph.query(
                "MATCH (n) WHERE toLower(n.name) CONTAINS toLower($q) "
                "RETURN n.name, n.type, labels(n) LIMIT 10",
                params={"q": query},
            )
            if not result.result_set:
                output = f"No entities found matching: {query}"
            else:
                output = f"Entities matching '{query}':\n\n"
                for row in result.result_set:
                    labels = row[2] if row[2] else []
                    output += f"  {row[0]} (type: {row[1]}, labels: {labels})\n"

        _cache_set(cache_key, output, ttl=300)
        return output

    except Exception as e:
        logger.error("Knowledge graph query failed: %s", e)
        return f"Knowledge graph query error: {e}"


# ---------------------------------------------------------------------------
# Redis Response Cache
# ---------------------------------------------------------------------------
_redis_client = None


def _get_cache():
    """Lazy init Redis cache client."""
    global _redis_client
    if _redis_client is None:
        _redis_client = _get_redis_client()
    return _redis_client


def _cache_get(key: str) -> Optional[str]:
    """Get cached value."""
    try:
        client = _get_cache()
        if client:
            val = client.get(f"genesis:voice_bridge:{key}")
            return val if val else None
    except Exception:
        pass
    return None


def _cache_set(key: str, value: str, ttl: int = 300):
    """Set cached value with TTL in seconds."""
    try:
        client = _get_cache()
        if client:
            client.setex(f"genesis:voice_bridge:{key}", ttl, value)
    except Exception:
        pass


def _cache_key(prefix: str, *args) -> str:
    """Generate a cache key from prefix and arguments."""
    raw = f"{prefix}:" + ":".join(str(a) for a in args)
    return hashlib.md5(raw.encode()).hexdigest()[:16]


# ---------------------------------------------------------------------------
# Bearer Token Auth Middleware
# ---------------------------------------------------------------------------
class BearerTokenMiddleware:
    """
    ASGI middleware that enforces Bearer token auth on /mcp and /sse paths.
    The /health endpoint is left open for monitoring.
    Also injects X-Accel-Buffering: no for SSE to prevent proxy buffering
    (required for Cloudflare, nginx, and similar reverse proxies).
    """

    OPEN_PATHS = {"/health", "/favicon.ico"}

    def __init__(self, app: ASGIApp, token: str):
        self.app = app
        self.token = token

    async def __call__(self, scope: Scope, receive: Receive, send: Send):
        if scope["type"] == "http":
            path = scope.get("path", "")
            # Allow open paths without auth
            if path in self.OPEN_PATHS:
                await self.app(scope, receive, send)
                return
            # Check Authorization header
            headers = dict(scope.get("headers", []))
            auth_header = headers.get(b"authorization", b"").decode("utf-8", errors="ignore")
            if not auth_header.startswith("Bearer "):
                response = JSONResponse(
                    {"error": "Missing or invalid Authorization header"},
                    status_code=401,
                )
                await response(scope, receive, send)
                return
            token = auth_header[7:]  # Strip "Bearer "
            if token != self.token:
                response = JSONResponse(
                    {"error": "Invalid bearer token"},
                    status_code=403,
                )
                await response(scope, receive, send)
                return

            # For SSE endpoints, inject buffering-disable headers so that
            # Cloudflare / nginx / reverse proxies don't buffer the stream.
            if path == "/sse":
                async def send_with_no_buffer(message):
                    if message["type"] == "http.response.start":
                        headers_list = list(message.get("headers", []))
                        headers_list.append((b"x-accel-buffering", b"no"))
                        headers_list.append((b"cache-control", b"no-cache, no-transform"))
                        message = {**message, "headers": headers_list}
                    await send(message)

                await self.app(scope, receive, send_with_no_buffer)
                return

        await self.app(scope, receive, send)


# ---------------------------------------------------------------------------
# FastMCP Server
# ---------------------------------------------------------------------------
mcp = FastMCP(
    "Genesis Voice Bridge",
    instructions=(
        "Genesis Voice Bridge provides access to the Genesis AI system's memory, "
        "knowledge graph, project status, and strategic context. Use these tools "
        "to answer questions about Genesis projects, architecture, decisions, "
        "and operational status."
    ),
)


# ---------------------------------------------------------------------------
# Health Check
# ---------------------------------------------------------------------------
@mcp.custom_route("/health", methods=["GET"])
async def health_check(request):
    """Health check endpoint (no auth required)."""
    return JSONResponse({
        "status": "healthy",
        "service": "genesis-voice-bridge",
        "version": "1.0.0",
        "timestamp": datetime.utcnow().isoformat(),
    })


# ---------------------------------------------------------------------------
# Tool: get_project_status
# ---------------------------------------------------------------------------
@mcp.tool
def get_project_status(project: str = "all") -> str:
    """
    Get current status of Genesis projects.

    Args:
        project: Project name (receptionistai, clawdtalk, sunaiva, aiva, all)

    Returns:
        Current status summary for the requested project(s).
    """
    cache_key = _cache_key("project_status", project)
    cached = _cache_get(cache_key)
    if cached:
        return cached

    projects = {
        "receptionistai": {
            "name": "ReceptionistAI",
            "status": "Ready to Launch",
            "description": "AI voice agents for Australian businesses",
            "pricing": "$497/mo B2B",
            "tech": "Telnyx TeXML direct (NO VAPI, 65-70% cost savings)",
            "milestone": "AIVA sold plumbing receptionist COLD on 30-min test call",
            "next_steps": [
                "Ship voice demo on website hero",
                "First beta customer (free week for plumber)",
                "Facebook marketing engine",
            ],
        },
        "clawdtalk": {
            "name": "ClawdTalk",
            "status": "Pre-Launch",
            "description": "Personal AI assistant with memory",
            "pricing": "$47-197/mo B2C",
            "potential": "$25K-200K/mo, 8.2/10 defensibility",
            "milestone": "Revenue opportunity report complete",
            "next_steps": ["Ships first among consumer products"],
        },
        "sunaiva": {
            "name": "Sunaiva",
            "status": "MVP Built",
            "description": "AI Memory platform - upload chat exports, extract entities, get MCP server",
            "pricing": "$47-197/mo (Starter/Pro/Sovereign)",
            "tech": "FastAPI + FastMCP + vanilla HTML frontend",
            "milestone": "8/8 E2E tests passing",
            "next_steps": ["Auth/accounts", "Payments", "Hosting", "Admin dashboard"],
        },
        "aiva": {
            "name": "AIVA (Queen)",
            "status": "Phase 3 In Progress - Queen Ascension",
            "description": "AI Queen, lives on Mac Mini M4, Telegram @AivaTel_bot",
            "tech": "OpenClaw v2026.2.6-3 on Mac Mini M4 (192.168.1.57)",
            "voice": "+61 7 3130 4377, Telnyx.NaturalHD.eucalyptus (Australian Female)",
            "skills": "18/59 ready",
            "milestone": "Phase 2 complete - Decision Tracker, Autonomy Assessment, Genesis Sync",
            "next_steps": [
                "Phase 3: NAMP bootstrap, 15-min decision cycles",
                "Phase 4: N8N/SSH dispatch, model routing, revenue pipeline",
            ],
        },
    }

    project_lower = project.lower().strip()
    if project_lower == "all":
        result = "GENESIS PROJECT STATUS\n" + "=" * 40 + "\n\n"
        for p in projects.values():
            result += f"## {p['name']} [{p['status']}]\n"
            result += f"{p['description']}\n"
            if "pricing" in p:
                result += f"Pricing: {p['pricing']}\n"
            result += f"Latest: {p['milestone']}\n\n"
    elif project_lower in projects:
        p = projects[project_lower]
        result = json.dumps(p, indent=2)
    else:
        result = f"Unknown project: {project}. Available: {', '.join(projects.keys())}"

    _cache_set(cache_key, result, ttl=600)
    return result


# ---------------------------------------------------------------------------
# Tool: get_war_room
# ---------------------------------------------------------------------------
@mcp.tool
def get_war_room() -> str:
    """
    Get active war room status: missions, blockers, completed items.

    Returns:
        Current war room status including active missions, completed work,
        blockers, and running agents.
    """
    cache_key = _cache_key("war_room")
    cached = _cache_get(cache_key)
    if cached:
        return cached

    war_room = {
        "active_mission": "AIVA Queen Ascension - Phase 3 of 4",
        "completed_this_session": [
            "Unified Memory Hub: core/genesis_memory_hub.py (510 lines) + MCP wrapper",
            "FalkorDB Activated: genesis graph with 952 nodes, 285 edges, 0 errors",
            "AIVA Queen Gap Analysis: Comprehensive report + 4-phase roadmap",
            "IndyDevDan Opus 4.6: 25 axioms, KG entity, 2876-line transcript",
            "ClawdTalk Revenue: $25K-200K/mo potential, 8.2/10 defensibility",
            "Sunaiva MVP: 8/8 E2E tests passing",
            "AIVA model swap, Telnyx voice calling, voice config",
            "Voice Bridge MCP Server (this server)",
        ],
        "running": [
            "Phase 3A: NAMP bootstrap + decision cycles",
            "Phase 3B: Multi-agent + morning briefing",
        ],
        "blockers": [
            "Facebook marketing engine not built yet",
            "Playwright MCP needs apt-get install",
            "NVIDIA NIM keys needed for Kimi K2.5 + MiniMax free access",
        ],
        "phase_status": {
            "phase_1": "DONE - OpenClaw hardened, LanceDB, sessions cleaned",
            "phase_2": "DONE - Decision Tracker, Autonomy Assessment, Genesis Sync",
            "phase_3": "IN PROGRESS - NAMP, decision cycles, multi-agent",
            "phase_4": "QUEUED - N8N/SSH dispatch, model routing, revenue pipeline",
        },
        "key_metrics": {
            "skills_registered": "64 (v2.5.0)",
            "agents": 8,
            "kg_axioms": "434+",
            "kg_entities": "71+",
            "kg_relationships": "125+",
            "alpha_evolve_cycles": 22,
        },
    }

    result = "GENESIS WAR ROOM STATUS\n" + "=" * 40 + "\n\n"
    result += f"Active Mission: {war_room['active_mission']}\n\n"

    result += "COMPLETED THIS SESSION:\n"
    for item in war_room["completed_this_session"]:
        result += f"  - {item}\n"

    result += "\nCURRENTLY RUNNING:\n"
    for item in war_room["running"]:
        result += f"  - {item}\n"

    result += "\nBLOCKERS:\n"
    for item in war_room["blockers"]:
        result += f"  ! {item}\n"

    result += "\nPHASE STATUS:\n"
    for phase, status in war_room["phase_status"].items():
        result += f"  {phase}: {status}\n"

    result += "\nKEY METRICS:\n"
    for metric, val in war_room["key_metrics"].items():
        result += f"  {metric}: {val}\n"

    _cache_set(cache_key, result, ttl=300)
    return result


# ---------------------------------------------------------------------------
# Tool: search_memory
# ---------------------------------------------------------------------------
@mcp.tool
def search_memory(query: str, limit: int = 5) -> str:
    """
    Semantic search across Supermemory for any topic.

    Args:
        query: The search query (natural language).
        limit: Maximum number of results (default 5).

    Returns:
        Matching memories from the Genesis Supermemory system.
    """
    import requests as req

    cache_key = _cache_key("search_memory", query, limit)
    cached = _cache_get(cache_key)
    if cached:
        return cached

    try:
        resp = req.post(
            "https://api.supermemory.ai/v3/search",
            headers={
                "Authorization": f"Bearer {SUPERMEMORY_API_KEY}",
                "Content-Type": "application/json",
            },
            json={"q": query, "topK": limit, "containerTags": [SUPERMEMORY_CONTAINER]},
            timeout=10,
        )
        resp.raise_for_status()
        data = resp.json()

        results = data.get("results", [])
        if not results:
            return f"No memories found for query: {query}"

        output = f"Found {len(results)} memories for '{query}':\n\n"
        for i, r in enumerate(results, 1):
            score = r.get("score", 0)
            created = r.get("createdAt", "unknown")
            if "T" in str(created):
                created = str(created).split("T")[0]
            chunks = r.get("chunks", [])
            content = " ".join(c.get("content", "") for c in chunks)
            # Truncate for voice readability
            if len(content) > 500:
                content = content[:500] + "..."
            output += f"{i}. [{created}] ({score*100:.0f}% match)\n{content}\n\n"

        _cache_set(cache_key, output, ttl=120)
        return output

    except Exception as e:
        logger.error("Supermemory search failed: %s", e)
        return f"Memory search error: {e}"


# ---------------------------------------------------------------------------
# Tool: query_knowledge_graph
# ---------------------------------------------------------------------------
@mcp.tool
def query_knowledge_graph(query: str, query_type: str = "search") -> str:
    """
    Query the FalkorDB knowledge graph for entities, axioms, and relationships.

    Args:
        query: Search term or Cypher query fragment.
        query_type: Type of query - 'search' (entity name search),
                    'axioms' (find axioms), 'relationships' (find connections),
                    'stats' (graph statistics).

    Returns:
        Knowledge graph results formatted for voice delivery.
    """
    cache_key = _cache_key("kg", query, query_type)
    cached = _cache_get(cache_key)
    if cached:
        return cached

    client = _get_falkordb_client()
    if not client:
        return "Knowledge graph unavailable. FalkorDB connection failed."

    try:
        graph = client.select_graph("genesis")

        if query_type == "stats":
            node_result = graph.query("MATCH (n) RETURN count(n) as count")
            edge_result = graph.query("MATCH ()-[r]->() RETURN count(r) as count")
            node_count = node_result.result_set[0][0] if node_result.result_set else 0
            edge_count = edge_result.result_set[0][0] if edge_result.result_set else 0
            output = (
                f"Knowledge Graph Statistics:\n"
                f"  Nodes: {node_count}\n"
                f"  Edges: {edge_count}\n"
                f"  Graph: genesis (FalkorDB on Elestio)"
            )

        elif query_type == "axioms":
            result = graph.query(
                "MATCH (a) WHERE a.type = 'axiom' AND toLower(a.content) CONTAINS toLower($q) "
                "RETURN a.id, a.content, a.source LIMIT 10",
                params={"q": query},
            )
            if not result.result_set:
                output = f"No axioms found matching: {query}"
            else:
                output = f"Axioms matching '{query}':\n\n"
                for row in result.result_set:
                    output += f"  [{row[0]}] {row[1]}\n  Source: {row[2]}\n\n"

        elif query_type == "relationships":
            result = graph.query(
                "MATCH (a)-[r]->(b) WHERE toLower(a.name) CONTAINS toLower($q) "
                "OR toLower(b.name) CONTAINS toLower($q) "
                "RETURN a.name, type(r), b.name LIMIT 15",
                params={"q": query},
            )
            if not result.result_set:
                output = f"No relationships found for: {query}"
            else:
                output = f"Relationships for '{query}':\n\n"
                for row in result.result_set:
                    output += f"  {row[0]} --[{row[1]}]--> {row[2]}\n"

        else:  # search
            result = graph.query(
                "MATCH (n) WHERE toLower(n.name) CONTAINS toLower($q) "
                "RETURN n.name, n.type, labels(n) LIMIT 10",
                params={"q": query},
            )
            if not result.result_set:
                output = f"No entities found matching: {query}"
            else:
                output = f"Entities matching '{query}':\n\n"
                for row in result.result_set:
                    labels = row[2] if row[2] else []
                    output += f"  {row[0]} (type: {row[1]}, labels: {labels})\n"

        _cache_set(cache_key, output, ttl=300)
        return output

    except Exception as e:
        logger.error("Knowledge graph query failed: %s", e)
        return f"Knowledge graph query error: {e}"


# ---------------------------------------------------------------------------
# Tool: get_recent_decisions
# ---------------------------------------------------------------------------
@mcp.tool
def get_recent_decisions(days: int = 7) -> str:
    """
    Pull recent strategic decisions from memory systems.

    Args:
        days: How many days back to look (default 7).

    Returns:
        Recent strategic decisions made within the Genesis system.
    """
    cache_key = _cache_key("decisions", days)
    cached = _cache_get(cache_key)
    if cached:
        return cached

    decisions = []

    # Query PostgreSQL for recent decisions
    conn = _get_pg_connection()
    if conn:
        try:
            cursor = conn.cursor()
            cutoff = (datetime.utcnow() - timedelta(days=days)).isoformat()
            cursor.execute(
                """
                SELECT content, source_type, importance_score, created_at
                FROM episodes
                WHERE created_at >= %s
                  AND (source_type ILIKE '%%decision%%' OR content ILIKE '%%decision%%'
                       OR content ILIKE '%%decided%%' OR content ILIKE '%%strategic%%')
                ORDER BY importance_score DESC, created_at DESC
                LIMIT 15
                """,
                (cutoff,),
            )
            rows = cursor.fetchall()
            for row in rows:
                decisions.append({
                    "content": row[0][:300],
                    "source": row[1],
                    "importance": float(row[2]) if row[2] else 0,
                    "date": str(row[3])[:10] if row[3] else "unknown",
                })
            conn.close()
        except Exception as e:
            logger.error("PG decision query failed: %s", e)
            if conn:
                conn.close()

    # Also search Supermemory for decisions
    try:
        import requests as req
        resp = req.post(
            "https://api.supermemory.ai/v3/search",
            headers={
                "Authorization": f"Bearer {SUPERMEMORY_API_KEY}",
                "Content-Type": "application/json",
            },
            json={
                "q": "strategic decision directive",
                "topK": 5,
                "containerTags": [SUPERMEMORY_CONTAINER],
            },
            timeout=10,
        )
        if resp.status_code == 200:
            for r in resp.json().get("results", []):
                chunks = r.get("chunks", [])
                content = " ".join(c.get("content", "") for c in chunks)[:300]
                if content:
                    decisions.append({
                        "content": content,
                        "source": "supermemory",
                        "importance": r.get("score", 0),
                        "date": str(r.get("createdAt", ""))[:10],
                    })
    except Exception as e:
        logger.error("Supermemory decision search failed: %s", e)

    if not decisions:
        output = f"No strategic decisions found in the last {days} days."
    else:
        output = f"Recent Strategic Decisions (last {days} days):\n\n"
        for i, d in enumerate(decisions[:10], 1):
            output += f"{i}. [{d['date']}] (importance: {d['importance']:.2f})\n"
            output += f"   {d['content']}\n\n"

    _cache_set(cache_key, output, ttl=180)
    return output


# ---------------------------------------------------------------------------
# Tool: get_agent_status
# ---------------------------------------------------------------------------
@mcp.tool
def get_agent_status() -> str:
    """
    Check what agents are registered and their capabilities.

    Returns:
        Status of all Genesis agents and their current state.
    """
    cache_key = _cache_key("agent_status")
    cached = _cache_get(cache_key)
    if cached:
        return cached

    agents = {
        "agents": [
            {"name": "research-scout", "role": "Research and exploration", "status": "available"},
            {"name": "builder", "role": "Code implementation", "status": "available"},
            {"name": "tester", "role": "Testing and verification", "status": "available"},
            {"name": "reviewer", "role": "Code review and quality", "status": "available"},
            {"name": "documenter", "role": "Documentation", "status": "available"},
            {"name": "deployer", "role": "Deployment and infrastructure", "status": "available"},
            {"name": "alpha-evolve", "role": "Recursive self-improvement", "status": "available"},
            {"name": "orchestrator", "role": "Task coordination", "status": "available"},
        ],
        "skills_count": 64,
        "skills_version": "v2.5.0",
        "thread_types": [
            "research", "build", "test", "review", "deploy", "evolve", "orchestrate"
        ],
    }

    # Try to get live Redis agent state
    redis_client = _get_cache()
    if redis_client:
        try:
            active = redis_client.keys("genesis:agent:*")
            if active:
                agents["active_redis_agents"] = len(active)
        except Exception:
            pass

    output = "GENESIS AGENT STATUS\n" + "=" * 40 + "\n\n"
    output += f"Total Agents: {len(agents['agents'])}\n"
    output += f"Skills: {agents['skills_count']} ({agents['skills_version']})\n\n"

    for a in agents["agents"]:
        output += f"  {a['name']}: {a['role']} [{a['status']}]\n"

    if "active_redis_agents" in agents:
        output += f"\nActive in Redis: {agents['active_redis_agents']}\n"

    _cache_set(cache_key, output, ttl=600)
    return output


# ---------------------------------------------------------------------------
# Tool: get_memory_context
# ---------------------------------------------------------------------------
@mcp.tool
def get_memory_context() -> str:
    """
    Get current MEMORY.md contents - the persistent state of Genesis.

    Returns:
        The full contents of the Genesis MEMORY.md file,
        which contains the system's persistent memory state.
    """
    cache_key = _cache_key("memory_context")
    cached = _cache_get(cache_key)
    if cached:
        return cached

    # Try the user's private MEMORY.md first, then fallback
    memory_paths = [
        Path("/home/authentic88/.claude/projects/-mnt-e-genesis-system/memory/MEMORY.md"),
        MEMORY_MD_PATH,
        GENESIS_ROOT / "docs" / "MEMORY.md",
    ]

    for path in memory_paths:
        if path.exists():
            try:
                content = path.read_text(encoding="utf-8")
                # Truncate for voice delivery (keep most important sections)
                if len(content) > 4000:
                    # Extract key sections
                    sections = []
                    current_section = []
                    for line in content.split("\n"):
                        if line.startswith("## ") and current_section:
                            sections.append("\n".join(current_section))
                            current_section = [line]
                        else:
                            current_section.append(line)
                    if current_section:
                        sections.append("\n".join(current_section))

                    # Priority sections
                    priority_keywords = [
                        "PRIME DIRECTIVE", "WAR ROOM", "COMMAND CENTRE",
                        "AGI SPRINT", "IMMORTAL", "Sunaiva", "AIVA",
                    ]
                    priority_sections = []
                    other_sections = []
                    for s in sections:
                        if any(kw.lower() in s.lower() for kw in priority_keywords):
                            priority_sections.append(s)
                        else:
                            other_sections.append(s)

                    content = "\n\n".join(priority_sections[:6])
                    remaining = 4000 - len(content)
                    if remaining > 200:
                        for s in other_sections:
                            if len(s) < remaining:
                                content += "\n\n" + s
                                remaining -= len(s)

                _cache_set(cache_key, content, ttl=300)
                return content
            except Exception as e:
                logger.error("Failed to read %s: %s", path, e)

    return "MEMORY.md not found. The Genesis persistent state file is unavailable."


# ---------------------------------------------------------------------------
# Tool: get_session_progress
# ---------------------------------------------------------------------------
@mcp.tool
def get_session_progress() -> str:
    """
    Get active session progress from hive/progress directory.

    Returns:
        Latest session progress reports from dispatched agents.
    """
    cache_key = _cache_key("session_progress")
    cached = _cache_get(cache_key)
    if cached:
        return cached

    if not HIVE_PROGRESS_DIR.exists():
        return "No hive/progress directory found."

    progress_files = sorted(HIVE_PROGRESS_DIR.glob("*.md"), key=lambda f: f.stat().st_mtime, reverse=True)
    if not progress_files:
        return "No progress reports found in hive/progress/."

    output = "SESSION PROGRESS REPORTS\n" + "=" * 40 + "\n\n"
    for f in progress_files[:5]:
        try:
            content = f.read_text(encoding="utf-8")
            if len(content) > 1000:
                content = content[:1000] + "\n... (truncated)"
            output += f"--- {f.name} ---\n{content}\n\n"
        except Exception as e:
            output += f"--- {f.name} --- Error: {e}\n\n"

    _cache_set(cache_key, output, ttl=120)
    return output


# ---------------------------------------------------------------------------
# Tool: get_architecture_summary
# ---------------------------------------------------------------------------
@mcp.tool
def get_architecture_summary() -> str:
    """
    Get key architecture decisions and patterns used in Genesis.

    Returns:
        Summary of Genesis architecture, infrastructure, and design patterns.
    """
    cache_key = _cache_key("architecture")
    cached = _cache_get(cache_key)
    if cached:
        return cached

    summary = """GENESIS ARCHITECTURE SUMMARY
========================================

## Core Infrastructure (Elestio Cloud)
- PostgreSQL: Episodic memory, entity state, relational data
- Qdrant: Vector embeddings, semantic search (1536-dim, Cosine)
- Redis: Caching, working memory, agent coordination queues
- FalkorDB: Knowledge graph (952 nodes, 285 edges)
- Supermemory: External semantic memory API
- n8n: Workflow orchestration (541 integrations)

## Voice Architecture
- Telnyx TeXML: Direct voice integration (NO VAPI - 65-70% cost savings)
- AIVA line: +61 7 3130 4377 (Gemini 2.5 Flash, eucalyptus female voice)
- Claude line: +61 7 3130 4226 (Haiku 4.5, marlu male voice)
- Cost: $0.025-0.035/min (Telnyx direct)

## AI Architecture
- Orchestrator: Claude Opus 4.6 (200K/1M context)
- Execution: Gemini swarm via Rate Maximizer (90-95% capacity)
- AIVA: OpenClaw on Mac Mini M4, GLM-4.6V-Flash-9B local
- Cloud models via OpenRouter: Claude Sonnet 4, Gemini 2.5 Flash

## Key Patterns
- Multi-Agent: TeamCreate -> TaskCreate -> spawn -> parallel -> shutdown -> delete
- Three Pillars: Orchestration + Observability + Sandboxes (ALL COMPLETE)
- Core Four: Context + Model + Prompt + Tools
- Alpha Evolve: INGEST -> EXTRACT -> SYNTHESIZE -> HARDCODE -> VERIFY -> EVOLVE
- Memory Moat: 5-system unified memory (PG, Qdrant, Redis, Supermemory, FalkorDB)

## 3 Prime Directives
1. MEMORY - Remember everything. Memory is our moat.
2. EVOLUTION - Improve perpetually. Recursive self-improvement.
3. REVENUE - Generate value. Ship products, acquire customers.

## Cost Structure
- Elestio infra: ~$92/mo
- Telnyx voice: $0.025-0.035/min
- Model costs: Haiku $1/MTok, Sonnet $9/MTok, Opus $15/MTok
"""

    _cache_set(cache_key, summary, ttl=900)
    return summary


# ---------------------------------------------------------------------------
# Tool: get_revenue_status
# ---------------------------------------------------------------------------
@mcp.tool
def get_revenue_status() -> str:
    """
    Get current revenue targets, pipeline, and financial status.

    Returns:
        Revenue targets, cost structure, and financial metrics.
    """
    cache_key = _cache_key("revenue")
    cached = _cache_get(cache_key)
    if cached:
        return cached

    revenue = """GENESIS REVENUE STATUS
========================================

## Revenue Targets
- Break-even: 1 customer at $47/mo
- Month 3 target: $2-5K MRR
- Month 7 target: $10K MRR
- Memory moat: 2.17x revenue retention vs traditional SaaS

## Products & Pricing
1. ReceptionistAI: $497/mo B2B (AI voice agents for trades)
2. ClawdTalk: $47-197/mo B2C (personal AI with memory)
3. Sunaiva: $47-197/mo (AI memory platform)

## Cost Structure
- Elestio infrastructure: ~$92/mo
- Telnyx voice: $0.025-0.035/min
- Production hardening: $2/mo additional
- Model costs: Haiku $1/MTok, Sonnet $9/MTok, Opus $15/MTok

## Revenue Pipeline Status
- ReceptionistAI: READY TO LAUNCH (first beta customer next)
- ClawdTalk: Ships first among consumer products
- Sunaiva: MVP done, needs auth + hosting for launch

## Key Revenue Insights
- AIVA successfully sold plumbing receptionist on cold 30-min call
- Memory moat is primary competitive advantage
- Facebook marketing engine is biggest gap to revenue
"""

    _cache_set(cache_key, revenue, ttl=600)
    return revenue


# ---------------------------------------------------------------------------
# Tool: memory_recent — last 50 KG entities from JSONL files
# ---------------------------------------------------------------------------
@mcp.tool
def memory_recent(limit: int = 50) -> str:
    """
    Get the most recent KG entities from Genesis knowledge graph files.

    Args:
        limit: Maximum number of entities to return (default 50, max 100).

    Returns:
        JSON-formatted list of recent KG entities sorted by file modification time.
    """
    cache_key = _cache_key("memory_recent", limit)
    cached = _cache_get(cache_key)
    if cached:
        return cached

    limit = min(max(1, limit), 100)
    kg_entities_dir = GENESIS_ROOT / "KNOWLEDGE_GRAPH" / "entities"

    if not kg_entities_dir.exists():
        return "Knowledge graph entities directory not found."

    entities = []
    try:
        jsonl_files = sorted(
            kg_entities_dir.glob("*.jsonl"),
            key=lambda f: f.stat().st_mtime,
            reverse=True,
        )

        for jsonl_file in jsonl_files[:20]:
            try:
                with open(jsonl_file, "r", encoding="utf-8") as f:
                    for line in f:
                        line = line.strip()
                        if not line:
                            continue
                        try:
                            entity = json.loads(line)
                            entity["_source_file"] = jsonl_file.name
                            entities.append(entity)
                            if len(entities) >= limit:
                                break
                        except json.JSONDecodeError:
                            continue
            except Exception as e:
                logger.warning("Failed to read KG file %s: %s", jsonl_file.name, e)
            if len(entities) >= limit:
                break

    except Exception as e:
        return f"Error reading KG entities: {e}"

    if not entities:
        return "No KG entities found."

    # Format for readability
    output = f"RECENT KG ENTITIES (last {len(entities)})\n" + "=" * 40 + "\n\n"
    for i, e in enumerate(entities, 1):
        entity_id = e.get("id") or e.get("entity_id") or f"entity_{i}"
        pattern_type = e.get("type") or e.get("pattern_type") or "unknown"
        content_fields = ["content", "what", "learning", "description", "axiom"]
        content = next((str(e[f])[:200] for f in content_fields if f in e and e[f]), str(e)[:200])
        source = e.get("_source_file", "")
        output += f"{i}. [{pattern_type}] {entity_id}\n   {content}\n   src: {source}\n\n"

    _cache_set(cache_key, output, ttl=60)
    return output


# ---------------------------------------------------------------------------
# Tool: memory_search — semantic search via Qdrant
# ---------------------------------------------------------------------------
@mcp.tool
def memory_search(query: str, limit: int = 10) -> str:
    """
    Semantic search across the Genesis Qdrant vector store.

    Searches the genesis_vectors and genesis_memories collections for
    content semantically similar to the query.

    Args:
        query: Natural language search query.
        limit: Maximum results to return (default 10, max 20).

    Returns:
        Ranked search results from Qdrant vector store.
    """
    cache_key = _cache_key("memory_search_qdrant", query, limit)
    cached = _cache_get(cache_key)
    if cached:
        return cached

    limit = min(max(1, limit), 20)
    client = _get_qdrant_client()
    if not client:
        # Fall back to Supermemory if Qdrant unavailable
        return search_memory(query, limit)

    try:
        from elestio_config import QdrantConfig
        cfg = QdrantConfig()

        # Check available collections
        collections_resp = client.get_collections()
        collection_names = [c.name for c in collections_resp.collections]

        if not collection_names:
            return f"No Qdrant collections available. Use search_memory for Supermemory search."

        # Try to get embedding for semantic search
        query_vector = None
        try:
            import os
            import requests as req
            api_key = os.environ.get("OPENAI_API_KEY", "")
            if api_key:
                resp = req.post(
                    "https://api.openai.com/v1/embeddings",
                    headers={"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"},
                    json={"model": "text-embedding-3-small", "input": query},
                    timeout=10,
                )
                if resp.status_code == 200:
                    query_vector = resp.json()["data"][0]["embedding"]
        except Exception as embed_err:
            logger.debug("Embedding fetch failed: %s", embed_err)

        results_output = f"Memory search results for: '{query}'\n" + "=" * 40 + "\n\n"
        found_any = False

        for col_name in collection_names[:3]:
            try:
                if query_vector:
                    hits = client.search(
                        collection_name=col_name,
                        query_vector=query_vector,
                        limit=limit,
                        with_payload=True,
                        score_threshold=0.4,
                    )
                else:
                    # Scroll fallback (no embedding)
                    scroll_result = client.scroll(
                        collection_name=col_name,
                        limit=limit,
                        with_payload=True,
                    )
                    hits = scroll_result[0] if scroll_result else []

                if hits:
                    found_any = True
                    results_output += f"Collection: {col_name} ({len(hits)} results)\n\n"
                    for i, hit in enumerate(hits, 1):
                        score = getattr(hit, "score", 0.0)
                        payload = hit.payload or {}
                        text = payload.get("text", "") or payload.get("content", "")
                        source = payload.get("source_file", payload.get("source", ""))
                        category = payload.get("category", payload.get("pattern_type", ""))
                        if len(text) > 300:
                            text = text[:300] + "..."
                        results_output += f"{i}. [{category}] (score: {score:.3f})\n"
                        if source:
                            results_output += f"   Source: {source}\n"
                        results_output += f"   {text}\n\n"

            except Exception as col_err:
                logger.debug("Search in collection %s failed: %s", col_name, col_err)

        if not found_any:
            results_output += "No matching results found in Qdrant.\n"
            results_output += "Tip: Ingest documents with memory_auto_ingest.py to populate search index."

        _cache_set(cache_key, results_output, ttl=120)
        return results_output

    except Exception as e:
        logger.error("memory_search failed: %s", e)
        return f"Memory search error: {e}"


# ---------------------------------------------------------------------------
# Tool: memory_stats — aggregate statistics about the memory system
# ---------------------------------------------------------------------------
@mcp.tool
def memory_stats() -> str:
    """
    Get comprehensive statistics about the Genesis memory infrastructure.

    Returns entity counts, vector counts, database sizes, cache stats,
    and ingestion pipeline status.

    Returns:
        Formatted statistics for all memory systems.
    """
    cache_key = _cache_key("memory_stats")
    cached = _cache_get(cache_key)
    if cached:
        return cached

    stats = {
        "timestamp": datetime.utcnow().isoformat() + "Z",
        "postgresql": {},
        "qdrant": {},
        "redis": {},
        "kg_files": {},
        "ingest_pipeline": {},
    }

    # PostgreSQL stats
    conn = _get_pg_connection()
    if conn:
        try:
            cur = conn.cursor()
            # Table row counts for memory-related tables
            memory_tables = [
                "digestion_kg_entities",
                "digestion_processed_sessions",
                "genesis_ingest_tracker",
                "bloodstream_knowledge",
                "em_episodic_memories",
                "episodes",
            ]
            table_counts = {}
            for table in memory_tables:
                try:
                    cur.execute(f"SELECT COUNT(*) FROM {table}")
                    count = cur.fetchone()[0]
                    table_counts[table] = count
                except Exception:
                    pass

            cur.execute("SELECT pg_database_size(current_database())")
            db_size = cur.fetchone()[0]

            stats["postgresql"] = {
                "status": "healthy",
                "db_size_mb": round(db_size / 1024 / 1024, 1),
                "table_counts": table_counts,
            }
            cur.close()
            conn.close()
        except Exception as e:
            stats["postgresql"] = {"status": "error", "error": str(e)}
    else:
        stats["postgresql"] = {"status": "unreachable"}

    # Qdrant stats
    qdrant_client = _get_qdrant_client()
    if qdrant_client:
        try:
            from elestio_config import QdrantConfig
            cfg = QdrantConfig()
            collections = qdrant_client.get_collections().collections
            col_stats = {}
            total_vectors = 0
            for col in collections:
                try:
                    col_info = qdrant_client.get_collection(col.name)
                    vcount = col_info.vectors_count or 0
                    col_stats[col.name] = vcount
                    total_vectors += vcount
                except Exception:
                    col_stats[col.name] = "unknown"

            stats["qdrant"] = {
                "status": "healthy",
                "collection_count": len(collections),
                "total_vectors": total_vectors,
                "collections": col_stats,
            }
        except Exception as e:
            stats["qdrant"] = {"status": "error", "error": str(e)}
    else:
        stats["qdrant"] = {"status": "unreachable"}

    # Redis stats
    redis_client = _get_cache()
    if redis_client:
        try:
            info = redis_client.info("memory")
            keycount = redis_client.dbsize()
            # Count genesis-specific keys
            genesis_keys = len(redis_client.keys("genesis:*"))
            stats["redis"] = {
                "status": "healthy",
                "total_keys": keycount,
                "genesis_keys": genesis_keys,
                "used_memory_mb": round(info.get("used_memory", 0) / 1024 / 1024, 1),
            }
        except Exception as e:
            stats["redis"] = {"status": "error", "error": str(e)}
    else:
        stats["redis"] = {"status": "unreachable"}

    # KG files stats
    kg_entities_dir = GENESIS_ROOT / "KNOWLEDGE_GRAPH" / "entities"
    kg_axioms_dir = GENESIS_ROOT / "KNOWLEDGE_GRAPH" / "axioms"
    if kg_entities_dir.exists():
        entity_files = list(kg_entities_dir.glob("*.jsonl"))
        entity_line_count = 0
        for f in entity_files:
            try:
                entity_line_count += sum(1 for line in f.read_text(encoding="utf-8").splitlines() if line.strip())
            except Exception:
                pass
        latest_file = max(entity_files, key=lambda f: f.stat().st_mtime, default=None)
        stats["kg_files"] = {
            "entity_files": len(entity_files),
            "total_entities": entity_line_count,
            "latest_file": latest_file.name if latest_file else None,
            "latest_mtime": datetime.fromtimestamp(latest_file.stat().st_mtime).isoformat() if latest_file else None,
        }
    if kg_axioms_dir.exists():
        axiom_files = list(kg_axioms_dir.glob("*.jsonl"))
        stats["kg_files"]["axiom_files"] = len(axiom_files)

    # Ingest pipeline status
    ingest_log = kg_entities_dir / "auto_ingest_log.jsonl" if kg_entities_dir.exists() else None
    health_log = kg_entities_dir / "memory_health_log.jsonl" if kg_entities_dir.exists() else None

    ingest_info = {"auto_ingest_log_exists": False, "health_monitor_log_exists": False}
    if ingest_log and ingest_log.exists():
        ingest_info["auto_ingest_log_exists"] = True
        try:
            lines = [l for l in ingest_log.read_text(encoding="utf-8").splitlines() if l.strip()]
            if lines:
                last = json.loads(lines[-1])
                ingest_info["last_ingest"] = last.get("timestamp", "")
                ingest_info["ingest_entry_count"] = len(lines)
        except Exception:
            pass
    if health_log and health_log.exists():
        ingest_info["health_monitor_log_exists"] = True
        try:
            lines = [l for l in health_log.read_text(encoding="utf-8").splitlines() if l.strip()]
            if lines:
                last = json.loads(lines[-1])
                ingest_info["last_health_check"] = last.get("timestamp", "")
                ingest_info["last_overall_status"] = last.get("overall", "")
        except Exception:
            pass

    stats["ingest_pipeline"] = ingest_info

    # Format for output
    output = "GENESIS MEMORY STATISTICS\n" + "=" * 40 + "\n\n"
    output += f"Timestamp: {stats['timestamp']}\n\n"

    output += "POSTGRESQL\n"
    pg = stats["postgresql"]
    output += f"  Status: {pg.get('status', 'unknown')}\n"
    if "db_size_mb" in pg:
        output += f"  DB Size: {pg['db_size_mb']} MB\n"
    if "table_counts" in pg:
        for table, count in pg["table_counts"].items():
            output += f"  {table}: {count} rows\n"
    output += "\n"

    output += "QDRANT\n"
    qd = stats["qdrant"]
    output += f"  Status: {qd.get('status', 'unknown')}\n"
    if "total_vectors" in qd:
        output += f"  Total Vectors: {qd['total_vectors']}\n"
    if "collections" in qd:
        for col, cnt in qd["collections"].items():
            output += f"  {col}: {cnt} vectors\n"
    output += "\n"

    output += "REDIS\n"
    rd = stats["redis"]
    output += f"  Status: {rd.get('status', 'unknown')}\n"
    if "total_keys" in rd:
        output += f"  Total Keys: {rd['total_keys']} ({rd.get('genesis_keys', 0)} genesis)\n"
        output += f"  Memory: {rd.get('used_memory_mb', 0)} MB\n"
    output += "\n"

    output += "KG FILES\n"
    kf = stats["kg_files"]
    output += f"  Entity Files: {kf.get('entity_files', 0)}\n"
    output += f"  Total Entities: {kf.get('total_entities', 0)}\n"
    output += f"  Axiom Files: {kf.get('axiom_files', 0)}\n"
    if kf.get("latest_file"):
        output += f"  Latest: {kf['latest_file']} ({kf.get('latest_mtime', '')[:10]})\n"
    output += "\n"

    output += "INGEST PIPELINE\n"
    ip = stats["ingest_pipeline"]
    output += f"  Auto-Ingest Log: {'active' if ip.get('auto_ingest_log_exists') else 'not running'}\n"
    if ip.get("last_ingest"):
        output += f"  Last Ingest: {ip['last_ingest'][:19]}\n"
    output += f"  Health Monitor: {'active' if ip.get('health_monitor_log_exists') else 'not running'}\n"
    if ip.get("last_health_check"):
        output += f"  Last Health Check: {ip['last_health_check'][:19]} [{ip.get('last_overall_status', '?')}]\n"

    _cache_set(cache_key, output, ttl=120)
    return output


# ---------------------------------------------------------------------------
# Resources
# ---------------------------------------------------------------------------
@mcp.resource("genesis://memory/current")
def resource_memory_md() -> str:
    """Current MEMORY.md contents - the persistent Genesis state."""
    return get_memory_context()


@mcp.resource("genesis://status/war-room")
def resource_war_room() -> str:
    """Current war room status."""
    return get_war_room()


@mcp.resource("genesis://architecture/summary")
def resource_architecture() -> str:
    """Genesis architecture summary."""
    return get_architecture_summary()


# ---------------------------------------------------------------------------
# Entry Point
# ---------------------------------------------------------------------------
def main():
    """Start the Genesis Voice Bridge MCP server."""
    logger.info("=" * 60)
    logger.info("Genesis Voice Bridge MCP Server")
    logger.info("=" * 60)
    logger.info("Transport: %s", TRANSPORT)
    logger.info("Host: %s", HOST)
    logger.info("Port: %s", PORT)
    logger.info("Auth: %s", "enabled" if AUTH_TOKEN else "disabled")
    logger.info("=" * 60)

    if TRANSPORT == "stdio":
        mcp.run(transport=TRANSPORT)
    else:
        mcp.run(
            transport=TRANSPORT,
            host=HOST,
            port=PORT,
        )


# ---------------------------------------------------------------------------
# ASGI App Factory
# ---------------------------------------------------------------------------
def create_app(transport: str = "sse"):
    """
    Create the ASGI app with middleware for production deployment.

    For SSE mode (Telnyx): Deploy with `uvicorn server:app` (default)
    For HTTP mode: Deploy with `uvicorn server:http_app`

    Args:
        transport: Transport protocol - "sse" or "http" (default "sse" for Telnyx)
    """
    cors_middleware = [
        Middleware(
            CORSMiddleware,
            allow_origins=["*"],
            allow_methods=["GET", "POST", "DELETE", "OPTIONS"],
            allow_headers=[
                "mcp-protocol-version",
                "mcp-session-id",
                "Authorization",
                "Content-Type",
            ],
            expose_headers=["mcp-session-id"],
        ),
    ]
    inner = mcp.http_app(transport=transport, middleware=cors_middleware)
    if AUTH_TOKEN:
        return BearerTokenMiddleware(inner, AUTH_TOKEN)
    return inner


# Default app for uvicorn (SSE transport for Telnyx compatibility)
app = create_app(transport="sse")

# Explicit aliases
sse_app = app
http_app = create_app(transport="http")


if __name__ == "__main__":
    main()
