"""
Genesis Memory Hub - Unified query across ALL memory systems.
One function to search everything. The missing bridge.

Memory Systems:
  1. Supermemory (genesis-kinan container) - semantic memory via API
  2. Graphiti (Sovereign Memory MCP) - knowledge graph nodes + facts
  3. File-based KG - KNOWLEDGE_GRAPH/ entities, axioms, relationships
  4. Qdrant - vector similarity search across embedded content
  5. PostgreSQL - structured search across memory_bloodstream, ponte_goldmine, etc.

Usage:
    # Async
    results = await query_all("voice agent pricing")

    # Sync
    results = query_sync("voice agent pricing")

    # Class-based
    hub = GenesisMemoryHub()
    await hub.connect()
    await hub.store("voice agent works well", {"category": "voice"})
    results = await hub.search("voice agent pricing")

    # CLI
    python3 core/genesis_memory_hub.py "voice agent pricing"
"""

import asyncio
import json
import os
import re
import sys
import time
import uuid
import hashlib
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import List, Dict, Optional, Set, Any, Tuple

import httpx

# ---------------------------------------------------------------------------
# Paths & constants
# ---------------------------------------------------------------------------
GENESIS_ROOT = os.environ.get("GENESIS_ROOT", "/mnt/e/genesis-system")
KG_DIR = Path(GENESIS_ROOT) / "KNOWLEDGE_GRAPH"

# Supermemory
SUPERMEMORY_API_KEY = os.environ.get(
    "SUPERMEMORY_API_KEY",
    "sm_EWRhbQPEodMHkJd8Vbshpx_wCauANQAwJFvFfTwTTrujWzHTQajuJPRJLFwavESILxQZpmDiqfIbDAAfGCffQQb",
)
SUPERMEMORY_CONTAINER = os.environ.get("SUPERMEMORY_CONTAINER_TAG", "genesis-kinan")

# Graphiti MCP (Sovereign Memory)
GRAPHITI_URL = os.environ.get("GRAPHITI_MCP_URL", "http://152.53.201.221:8001/mcp")
GRAPHITI_GROUP = os.environ.get("GRAPHITI_GROUP_ID", "genesis")

# Timeouts
SYSTEM_TIMEOUT = 10.0  # seconds per backend

# Elestio config path
sys.path.insert(0, str(Path(GENESIS_ROOT) / "data" / "genesis-memory"))

log = logging.getLogger("genesis_memory_hub")


# ---------------------------------------------------------------------------
# Data model
# ---------------------------------------------------------------------------
@dataclass
class MemoryResult:
    """A single result from any memory system."""
    source: str
    content: str
    relevance: float = 0.5
    metadata: dict = field(default_factory=dict)

    def __repr__(self) -> str:
        preview = self.content.replace("\n", " ")[:80]
        return (
            f"MemoryResult(source={self.source!r}, "
            f"relevance={self.relevance:.2f}, "
            f"content={preview!r}...)"
        )


# ---------------------------------------------------------------------------
# System 1: Supermemory
# ---------------------------------------------------------------------------
async def search_supermemory(query: str, limit: int = 5) -> List[MemoryResult]:
    """Search Supermemory via direct API (same as search.sh)."""
    results: List[MemoryResult] = []
    try:
        async with httpx.AsyncClient(timeout=SYSTEM_TIMEOUT) as client:
            resp = await client.post(
                "https://api.supermemory.ai/v3/search",
                headers={
                    "Authorization": f"Bearer {SUPERMEMORY_API_KEY}",
                    "Content-Type": "application/json",
                },
                json={
                    "query": query,
                    "limit": limit,
                    "containerTags": [SUPERMEMORY_CONTAINER],
                },
            )
            resp.raise_for_status()
            data = resp.json()
            for item in data.get("results", []):
                chunks = item.get("chunks", [])
                content = "\n".join(
                    c.get("content", "") for c in chunks if c.get("content")
                ).strip()
                if not content:
                    continue
                score = item.get("score", 0.5)
                results.append(
                    MemoryResult(
                        source="supermemory",
                        content=content,
                        relevance=min(float(score), 1.0),
                        metadata={
                            "createdAt": item.get("createdAt"),
                            "id": item.get("id"),
                        },
                    )
                )
    except Exception as e:
        results.append(
            MemoryResult(
                source="supermemory",
                content=f"[ERROR] Supermemory search failed: {e}",
                relevance=0.0,
                metadata={"error": str(e)},
            )
        )
    return results


async def store_supermemory(content: str, metadata: Dict) -> Dict[str, Any]:
    """Store content in Supermemory."""
    try:
        async with httpx.AsyncClient(timeout=SYSTEM_TIMEOUT) as client:
            payload: Dict[str, Any] = {
                "content": content,
                "containerTags": [SUPERMEMORY_CONTAINER],
            }
            if metadata:
                payload["metadata"] = metadata
            resp = await client.post(
                "https://api.supermemory.ai/v3/memories",
                headers={
                    "Authorization": f"Bearer {SUPERMEMORY_API_KEY}",
                    "Content-Type": "application/json",
                },
                json=payload,
            )
            resp.raise_for_status()
            return {"supermemory": True, "id": resp.json().get("id")}
    except Exception as e:
        return {"supermemory": False, "error": str(e)}


# ---------------------------------------------------------------------------
# System 2: Graphiti MCP
# ---------------------------------------------------------------------------
async def _graphiti_call(
    session_id: str, method: str, tool_name: str, arguments: dict
) -> dict:
    """Make a single MCP tool call to Graphiti."""
    payload = {
        "jsonrpc": "2.0",
        "id": session_id,
        "method": method,
        "params": {"name": tool_name, "arguments": arguments},
    }
    headers = {
        "Accept": "text/event-stream, application/json",
        "Content-Type": "application/json",
        "mcp-session-id": session_id,
    }
    async with httpx.AsyncClient(timeout=SYSTEM_TIMEOUT) as client:
        resp = await client.post(GRAPHITI_URL, json=payload, headers=headers)
        text = resp.text
        for line in text.split("\n"):
            if line.startswith("data:"):
                return json.loads(line[5:].strip())
    return {}


async def search_graphiti(query: str, limit: int = 5) -> List[MemoryResult]:
    """Search Graphiti knowledge graph for nodes and facts."""
    results: List[MemoryResult] = []
    try:
        session_id = f"hub-{uuid.uuid4().hex[:8]}"
        async with httpx.AsyncClient(timeout=SYSTEM_TIMEOUT) as client:
            # Initialize MCP session
            init_payload = {
                "jsonrpc": "2.0",
                "id": f"hub-init",
                "method": "initialize",
                "params": {
                    "protocolVersion": "2024-11-05",
                    "clientInfo": {"name": "genesis-memory-hub", "version": "1.0.0"},
                    "capabilities": {},
                },
            }
            headers = {
                "Accept": "text/event-stream, application/json",
                "Content-Type": "application/json",
                "mcp-session-id": session_id,
            }
            await client.post(GRAPHITI_URL, json=init_payload, headers=headers)

        # Search nodes
        nodes_resp = await _graphiti_call(
            session_id,
            "tools/call",
            "search_nodes",
            {"query": query, "group_ids": [GRAPHITI_GROUP], "max_results": limit},
        )
        content_items = []
        if nodes_resp:
            result_content = nodes_resp.get("result", {}).get("content", [])
            for item in result_content:
                text = item.get("text", "")
                if text:
                    content_items.append(text)

        # Search facts
        facts_resp = await _graphiti_call(
            session_id,
            "tools/call",
            "search_facts",
            {"query": query, "group_ids": [GRAPHITI_GROUP], "max_results": limit},
        )
        if facts_resp:
            result_content = facts_resp.get("result", {}).get("content", [])
            for item in result_content:
                text = item.get("text", "")
                if text:
                    content_items.append(text)

        for text in content_items:
            results.append(
                MemoryResult(
                    source="graphiti",
                    content=text,
                    relevance=0.7,
                    metadata={"group": GRAPHITI_GROUP},
                )
            )
    except Exception as e:
        results.append(
            MemoryResult(
                source="graphiti",
                content=f"[ERROR] Graphiti search failed: {e}",
                relevance=0.0,
                metadata={"error": str(e)},
            )
        )
    return results


# ---------------------------------------------------------------------------
# System 3: File-based Knowledge Graph
# ---------------------------------------------------------------------------
def _keyword_score(text: str, keywords: List[str]) -> float:
    """Simple keyword relevance: fraction of query keywords found in text."""
    lower = text.lower()
    found = sum(1 for kw in keywords if kw in lower)
    return found / len(keywords) if keywords else 0.0


async def search_file_kg(query: str, limit: int = 5) -> List[MemoryResult]:
    """Search file-based Knowledge Graph: entities, axioms, relationships."""
    results: List[MemoryResult] = []
    keywords = [w.lower() for w in query.split() if len(w) > 2]

    # Entities
    entity_dir = KG_DIR / "entities"
    if entity_dir.exists():
        for fpath in entity_dir.glob("*.json"):
            try:
                text = fpath.read_text(encoding="utf-8")
                score = _keyword_score(text, keywords)
                if score > 0:
                    try:
                        data = json.loads(text)
                        snippet = (
                            json.dumps(data)[:300] if isinstance(data, dict) else str(data)[:300]
                        )
                    except Exception:
                        snippet = text[:300]
                    results.append(
                        MemoryResult(
                            source="file_kg_entity",
                            content=snippet,
                            relevance=round(score, 2),
                            metadata={"path": str(fpath)},
                        )
                    )
            except Exception:
                pass

    # Axioms
    axiom_dir = KG_DIR / "axioms"
    if axiom_dir.exists():
        for fpath in axiom_dir.glob("*.jsonl"):
            try:
                with open(fpath, "r", encoding="utf-8") as f:
                    for line_num, line in enumerate(f):
                        line = line.strip()
                        if not line:
                            continue
                        try:
                            axiom = json.loads(line)
                            content = (
                                axiom.get("content")
                                or axiom.get("text")
                                or str(axiom)
                            )
                            score = _keyword_score(content, keywords)
                            if score > 0:
                                axiom_id = axiom.get("id", f"line-{line_num}")
                                results.append(
                                    MemoryResult(
                                        source="file_kg_axiom",
                                        content=content[:400],
                                        relevance=round(score, 2),
                                        metadata={
                                            "path": str(fpath),
                                            "id": axiom_id,
                                        },
                                    )
                                )
                        except json.JSONDecodeError:
                            pass
            except Exception:
                pass

    # Sort by relevance descending, cap at limit
    results.sort(key=lambda r: r.relevance, reverse=True)
    return results[:limit]


# ---------------------------------------------------------------------------
# System 4: Qdrant vector search
# ---------------------------------------------------------------------------
async def search_qdrant(query: str, limit: int = 5) -> List[MemoryResult]:
    """Vector similarity search across Qdrant collections."""
    results: List[MemoryResult] = []
    try:
        from qdrant_client import QdrantClient
        import qdrant_client.models as qmodels

        sys.path.insert(0, str(Path(GENESIS_ROOT) / "data" / "genesis-memory"))
        import elestio_config

        config = elestio_config.QdrantConfig()
        client = QdrantClient(
            url=config.url,
            api_key=config.api_key,
            timeout=SYSTEM_TIMEOUT,
        )

        # Generate embedding with fastembed (no API cost)
        try:
            from fastembed import TextEmbedding
            embedding_model = TextEmbedding("BAAI/bge-small-en-v1.5")
            query_vector = list(embedding_model.embed([query]))[0].tolist()
        except ImportError:
            results.append(
                MemoryResult(
                    source="qdrant",
                    content="[SKIP] fastembed not installed -- run: pip3 install fastembed --break-system-packages",
                    relevance=0.0,
                    metadata={"error": "fastembed not installed"},
                )
            )
            return results

        # Search all non-empty collections
        searchable_collections = []
        for col in client.get_collections().collections:
            try:
                col_info = client.get_collection(col.name)
                if col_info.points_count and col_info.points_count > 0:
                    vec_config = col_info.config.params.vectors
                    if hasattr(vec_config, "size"):
                        vec_size = vec_config.size
                    else:
                        continue
                    if len(query_vector) == vec_size:
                        searchable_collections.append(col.name)
            except Exception:
                pass

        for collection_name in searchable_collections:
            try:
                search_results = client.query_points(
                    collection_name=collection_name,
                    query=query_vector,
                    limit=min(limit, 3),
                )
                for point in search_results.points:
                    payload = point.payload or {}
                    text = (
                        payload.get("text")
                        or payload.get("content")
                        or payload.get("chunk_text")
                        or json.dumps(payload)
                    )
                    score = round(float(point.score) if hasattr(point, "score") else 0.5, 3)
                    results.append(
                        MemoryResult(
                            source=f"qdrant:{collection_name}",
                            content=str(text)[:500],
                            relevance=score,
                            metadata={
                                "path": payload.get("path"),
                                "id": str(point.id),
                            },
                        )
                    )
            except Exception:
                pass

    except Exception as e:
        results.append(
            MemoryResult(
                source="qdrant",
                content=f"[ERROR] Qdrant search failed: {e}",
                relevance=0.0,
                metadata={"error": str(e)},
            )
        )
    return results


async def store_qdrant(
    content: str, metadata: Dict, doc_id: Optional[str] = None
) -> Dict[str, Any]:
    """Store content in Qdrant with embedding."""
    try:
        from qdrant_client import QdrantClient
        import qdrant_client.models as qmodels
        from fastembed import TextEmbedding

        sys.path.insert(0, str(Path(GENESIS_ROOT) / "data" / "genesis-memory"))
        import elestio_config

        config = elestio_config.QdrantConfig()
        client = QdrantClient(url=config.url, api_key=config.api_key, timeout=SYSTEM_TIMEOUT)

        embedding_model = TextEmbedding("BAAI/bge-small-en-v1.5")
        vector = list(embedding_model.embed([content]))[0].tolist()

        if doc_id is None:
            namespace = uuid.UUID("e2a6d7f8-b3c4-4d5e-8f90-a1b2c3d4e5f6")
            doc_id = str(uuid.uuid5(namespace, content))

        collection_name = config.collection_name or "genesis_vectors"
        # Ensure collection exists
        try:
            client.get_collection(collection_name)
        except Exception:
            client.create_collection(
                collection_name=collection_name,
                vectors_config=qmodels.VectorParams(
                    size=len(vector), distance=qmodels.Distance.COSINE
                ),
            )

        payload = {"content": content, **metadata}
        client.upsert(
            collection_name=collection_name,
            points=[qmodels.PointStruct(id=doc_id, vector=vector, payload=payload)],
        )
        return {"qdrant": True, "id": doc_id, "collection": collection_name}
    except Exception as e:
        return {"qdrant": False, "error": str(e)}


async def delete_qdrant(doc_id: str) -> Dict[str, Any]:
    """Delete a point from all Qdrant collections by ID."""
    deleted = 0
    try:
        from qdrant_client import QdrantClient
        import qdrant_client.models as qmodels

        sys.path.insert(0, str(Path(GENESIS_ROOT) / "data" / "genesis-memory"))
        import elestio_config

        config = elestio_config.QdrantConfig()
        client = QdrantClient(url=config.url, api_key=config.api_key, timeout=SYSTEM_TIMEOUT)

        for col in client.get_collections().collections:
            try:
                client.delete(
                    collection_name=col.name,
                    points_selector=qmodels.PointIdsList(points=[doc_id]),
                )
                deleted += 1
            except Exception:
                pass
        return {"qdrant": True, "deleted_from_collections": deleted}
    except Exception as e:
        return {"qdrant": False, "error": str(e)}


# ---------------------------------------------------------------------------
# System 5: PostgreSQL
# ---------------------------------------------------------------------------
async def search_postgres(query: str, limit: int = 5) -> List[MemoryResult]:
    """Full-text search across key PostgreSQL tables."""
    results: List[MemoryResult] = []
    try:
        import psycopg2

        sys.path.insert(0, str(Path(GENESIS_ROOT) / "data" / "genesis-memory"))
        import elestio_config

        conn = psycopg2.connect(**elestio_config.PostgresConfig.get_connection_params())
        conn.set_session(autocommit=True)
        cur = conn.cursor()

        search_pattern = "%" + query + "%"
        keywords = [w.lower() for w in query.split() if len(w) > 2]
        keyword_conditions = " OR ".join(["content ILIKE %s"] * max(len(keywords), 1))

        # memory_bloodstream
        try:
            cur.execute(
                """
                SELECT content, memory_type, source, created_at
                FROM memory_bloodstream
                WHERE content ILIKE %s
                ORDER BY created_at DESC
                LIMIT %s
                """,
                (search_pattern, limit),
            )
            for row in cur.fetchall():
                content, memory_type, source, created_at = row
                results.append(
                    MemoryResult(
                        source="postgres:memory_bloodstream",
                        content=str(content)[:500],
                        relevance=round(min(_keyword_score(str(content), keywords) + 0.3, 1.0), 2),
                        metadata={"memory_type": memory_type, "source": source},
                    )
                )
        except Exception:
            pass

        # ponte_goldmine (wisdom/quotes)
        try:
            cur.execute(
                """
                SELECT quote, context, category, source_title, confidence
                FROM ponte_goldmine
                WHERE quote ILIKE %s OR context ILIKE %s OR category ILIKE %s
                ORDER BY confidence DESC NULLS LAST
                LIMIT %s
                """,
                (search_pattern, search_pattern, search_pattern, limit),
            )
            for row in cur.fetchall():
                quote, context, category, source_title, confidence = row
                content = str(quote)
                if context:
                    content += f"\nContext: {context}"
                results.append(
                    MemoryResult(
                        source="postgres:ponte_goldmine",
                        content=content[:500],
                        relevance=round(float(confidence or 0.5), 2),
                        metadata={"category": category, "source_title": source_title},
                    )
                )
        except Exception:
            pass

        # technologies table
        try:
            cur.execute(
                """
                SELECT name, description, category, priority_score, source
                FROM technologies
                WHERE name ILIKE %s OR description ILIKE %s OR category ILIKE %s
                ORDER BY priority_score DESC NULLS LAST
                LIMIT %s
                """,
                (search_pattern, search_pattern, search_pattern, limit),
            )
            for row in cur.fetchall():
                name, description, category, priority_score, source = row
                content = f"{name}: {description or ''}"
                results.append(
                    MemoryResult(
                        source="postgres:technologies",
                        content=content[:500],
                        relevance=round(min(float(priority_score or 0.5) / 10.0, 1.0), 2),
                        metadata={"category": category, "source": source},
                    )
                )
        except Exception:
            pass

        cur.close()
        conn.close()

    except Exception as e:
        results.append(
            MemoryResult(
                source="postgres",
                content=f"[ERROR] PostgreSQL search failed: {e}",
                relevance=0.0,
                metadata={"error": str(e)},
            )
        )
    return results


async def store_postgres(content: str, metadata: Dict) -> Dict[str, Any]:
    """Store content in PostgreSQL memory_bloodstream table."""
    try:
        import psycopg2

        sys.path.insert(0, str(Path(GENESIS_ROOT) / "data" / "genesis-memory"))
        import elestio_config

        conn = psycopg2.connect(**elestio_config.PostgresConfig.get_connection_params())
        cur = conn.cursor()

        memory_id = str(uuid.uuid4())
        memory_type = metadata.get("type", "hub_store")
        source = metadata.get("source", "genesis_memory_hub")
        extra_meta = json.dumps({k: v for k, v in metadata.items() if k not in ("type", "source")})

        cur.execute(
            """
            INSERT INTO memory_bloodstream (id, content, memory_type, source, metadata, created_at)
            VALUES (%s, %s, %s, %s, %s, NOW())
            ON CONFLICT (id) DO UPDATE SET content=EXCLUDED.content, metadata=EXCLUDED.metadata
            """,
            (memory_id, content, memory_type, source, extra_meta),
        )
        conn.commit()
        cur.close()
        conn.close()
        return {"postgres": True, "id": memory_id}
    except Exception as e:
        return {"postgres": False, "error": str(e)}


async def delete_postgres(doc_id: str) -> Dict[str, Any]:
    """Delete a record from memory_bloodstream by ID."""
    try:
        import psycopg2

        sys.path.insert(0, str(Path(GENESIS_ROOT) / "data" / "genesis-memory"))
        import elestio_config

        conn = psycopg2.connect(**elestio_config.PostgresConfig.get_connection_params())
        cur = conn.cursor()
        cur.execute("DELETE FROM memory_bloodstream WHERE id = %s", (doc_id,))
        deleted = cur.rowcount
        conn.commit()
        cur.close()
        conn.close()
        return {"postgres": True, "deleted": deleted}
    except Exception as e:
        return {"postgres": False, "error": str(e)}


# ---------------------------------------------------------------------------
# Redis fast recall
# ---------------------------------------------------------------------------
def _get_redis_client():
    """Get Redis client using Elestio config."""
    import redis as redis_lib

    sys.path.insert(0, str(Path(GENESIS_ROOT) / "data" / "genesis-memory"))
    import elestio_config

    params = elestio_config.RedisConfig.get_connection_params()
    return redis_lib.Redis(**params)


def recall_redis(key: str) -> Optional[str]:
    """Fast Redis recall by key. Returns None if missing."""
    try:
        r = _get_redis_client()
        value = r.get(f"gmh:{key}")
        return value if value is None else str(value)
    except Exception as e:
        log.warning(f"Redis recall failed for key={key!r}: {e}")
        return None


def store_redis(key: str, value: str, ttl_seconds: int = 3600) -> bool:
    """Store key/value in Redis with TTL. Returns True on success."""
    try:
        r = _get_redis_client()
        r.setex(f"gmh:{key}", ttl_seconds, value)
        return True
    except Exception as e:
        log.warning(f"Redis store failed for key={key!r}: {e}")
        return False


def delete_redis(key: str) -> bool:
    """Delete a key from Redis. Returns True if key existed."""
    try:
        r = _get_redis_client()
        deleted = r.delete(f"gmh:{key}")
        return deleted > 0
    except Exception as e:
        log.warning(f"Redis delete failed for key={key!r}: {e}")
        return False


# ---------------------------------------------------------------------------
# System registry
# ---------------------------------------------------------------------------
ALL_SYSTEMS = {
    "supermemory": search_supermemory,
    "graphiti": search_graphiti,
    "file_kg": search_file_kg,
    "qdrant": search_qdrant,
    "postgres": search_postgres,
}


# ---------------------------------------------------------------------------
# Core query functions (original interface preserved)
# ---------------------------------------------------------------------------
async def query_all(
    query: str,
    max_results: int = 10,
    systems: Optional[List[str]] = None,
    verbose: bool = False,
) -> List[MemoryResult]:
    """
    Search ALL Genesis memory systems in parallel.
    Returns merged, deduplicated, relevance-sorted results.

    Args:
        query: Natural language search query
        max_results: Maximum total results to return
        systems: Which systems to query (default: all). Options:
                 supermemory, graphiti, file_kg, qdrant, postgres
        verbose: If True, includes timing info in error results
    """
    selected = systems or list(ALL_SYSTEMS.keys())
    tasks = [ALL_SYSTEMS[s](query, max_results) for s in selected if s in ALL_SYSTEMS]

    start = time.time()
    raw_results = await asyncio.gather(*tasks, return_exceptions=True)
    elapsed = time.time() - start

    all_results: List[MemoryResult] = []
    system_names = [s for s in selected if s in ALL_SYSTEMS]

    for i, r in enumerate(raw_results):
        sys_name = system_names[i] if i < len(system_names) else "unknown"
        if isinstance(r, Exception):
            all_results.append(
                MemoryResult(
                    source=sys_name,
                    content=f"[ERROR] {sys_name} failed: {r}",
                    relevance=0.0,
                    metadata={"error": str(r)},
                )
            )
        elif isinstance(r, list):
            all_results.extend(r)

    # Deduplicate by content prefix
    seen: Set[str] = set()
    deduped: List[MemoryResult] = []
    for r in all_results:
        key = r.content.strip().lower()[:120]
        if key.startswith("[error]") or key.startswith("[skip]"):
            if verbose:
                deduped.append(r)
            continue
        if key not in seen:
            seen.add(key)
            deduped.append(r)

    deduped.sort(key=lambda x: x.relevance, reverse=True)

    if verbose:
        deduped.append(
            MemoryResult(
                source="_meta",
                content=f"Query completed in {elapsed:.2f}s across {len(selected)} systems",
                relevance=0.0,
                metadata={"elapsed": elapsed, "systems": selected},
            )
        )

    return deduped[:max_results]


def query_sync(
    query: str,
    max_results: int = 10,
    systems: Optional[List[str]] = None,
    verbose: bool = False,
) -> List[MemoryResult]:
    """Synchronous wrapper for query_all."""
    return asyncio.run(query_all(query, max_results=max_results, systems=systems, verbose=verbose))


def quick_search(query: str, top_n: int = 5) -> str:
    """
    Quick search returning formatted text. Ideal for injecting into agent context.

    Usage:
        context = quick_search("voice agent pricing")
        # Returns formatted text ready to paste into a prompt
    """
    results = query_sync(query, max_results=top_n)
    if not results:
        return f"No memories found for: {query}"

    lines = [f"=== Genesis Memory Search: '{query}' ===\n"]
    for i, r in enumerate(results):
        if r.content.startswith("[ERROR]") or r.content.startswith("[SKIP]"):
            continue
        lines.append(
            f"{i + 1}. [{r.source}] (relevance: {r.relevance:.2f})\n"
            f"   {r.content[:200]}"
        )
        if r.metadata.get("path"):
            lines.append(f"   File: {r.metadata['path']}")
        lines.append("")

    return "\n".join(lines)


def search_by_system(query: str, system: str, limit: int = 10) -> List[MemoryResult]:
    """Search a single system only."""
    return query_sync(query, max_results=limit, systems=[system])


def _format_result(r: MemoryResult, index: int) -> str:
    """Format a single result for CLI display."""
    lines = [
        f"  {index}. [{r.source}] (relevance: {r.relevance:.2f})",
        f"     {r.content.replace(chr(10), chr(10) + '     ')[:300]}",
    ]
    if r.metadata.get("path"):
        lines.append(f"     File: {r.metadata['path']}")
    if r.metadata.get("category"):
        lines.append(f"     Category: {r.metadata['category']}")
    return "\n".join(lines)


# ---------------------------------------------------------------------------
# GenesisMemoryHub class (class-based interface)
# ---------------------------------------------------------------------------
class GenesisMemoryHub:
    """
    Class-based interface for the Genesis Bloodstream Memory Architecture.

    Unifies 5 backends:
      - PostgreSQL  (Elestio) — episodic + structured memory
      - Qdrant      (Elestio) — vector/semantic search
      - Redis       (Elestio) — fast working memory / recall
      - Supermemory (API)     — semantic long-term memory
      - File-based KG         — axioms, entities, relationships (local)

    Usage:
        hub = GenesisMemoryHub()
        await hub.connect()
        await hub.store("pricing is $497/month", {"category": "pricing"})
        results = await hub.search("what is the price?")
        value = hub.recall("last_session_id")
    """

    def __init__(self):
        self.log = logging.getLogger("GenesisMemoryHub")
        self._connected = False
        self._pg_ok = False
        self._qdrant_ok = False
        self._redis_ok = False
        self._supermemory_ok = False

    async def connect(self) -> Dict[str, bool]:
        """
        Connect and verify all 5 backends.
        Returns health status dict.
        """
        status = await self.health_check()
        self._pg_ok = status.get("postgres", False)
        self._qdrant_ok = status.get("qdrant", False)
        self._redis_ok = status.get("redis", False)
        self._supermemory_ok = status.get("supermemory", False)
        self._connected = True

        connected = sum(1 for v in status.values() if v)
        self.log.info(f"GenesisMemoryHub connected: {connected}/{len(status)} backends online")
        return status

    async def store(
        self,
        content: str,
        metadata: Optional[Dict] = None,
        backends: Optional[List[str]] = None,
    ) -> Dict[str, Any]:
        """
        Store content to all backends (or specified subset).

        Args:
            content:  Text content to store
            metadata: Optional dict of metadata (type, source, category, etc.)
            backends: Which backends to store to. Default: postgres + qdrant + supermemory

        Returns:
            Dict with results per backend
        """
        if metadata is None:
            metadata = {}
        if backends is None:
            backends = ["postgres", "qdrant", "supermemory"]

        tasks = {}
        if "postgres" in backends:
            tasks["postgres"] = store_postgres(content, metadata)
        if "qdrant" in backends:
            tasks["qdrant"] = store_qdrant(content, metadata)
        if "supermemory" in backends:
            tasks["supermemory"] = store_supermemory(content, metadata)
        if "redis" in backends:
            key = metadata.get("key", hashlib.md5(content.encode()).hexdigest()[:16])
            ttl = int(metadata.get("ttl", 3600))
            tasks["redis"] = asyncio.coroutine(lambda: {"redis": store_redis(key, content, ttl)})()

        if not tasks:
            return {}

        results_list = await asyncio.gather(*tasks.values(), return_exceptions=True)
        results: Dict[str, Any] = {}
        for k, r in zip(tasks.keys(), results_list):
            if isinstance(r, Exception):
                results[k] = {"error": str(r), "stored": False}
            else:
                results[k] = r

        return results

    async def search(
        self,
        query: str,
        limit: int = 10,
        systems: Optional[List[str]] = None,
    ) -> List[MemoryResult]:
        """
        Semantic search across all memory systems.

        Args:
            query:   Natural language query
            limit:   Maximum results to return
            systems: Specific systems to search. Default: all.

        Returns:
            List of MemoryResult objects sorted by relevance.
        """
        return await query_all(query, max_results=limit, systems=systems)

    def recall(self, key: str) -> Optional[str]:
        """
        Fast Redis recall by key.

        Args:
            key: Lookup key (will be prefixed with 'gmh:')

        Returns:
            String value if found, None if not found or Redis unavailable.
        """
        return recall_redis(key)

    def remember(self, key: str, value: str, ttl_seconds: int = 3600) -> bool:
        """
        Store a key/value pair in Redis working memory.

        Args:
            key:         Lookup key
            value:       String value to store
            ttl_seconds: Time-to-live in seconds (default: 1 hour)

        Returns:
            True on success, False on failure.
        """
        return store_redis(key, value, ttl_seconds)

    async def forget(self, doc_id: str) -> Dict[str, Any]:
        """
        Remove a memory from all backends by ID.

        Args:
            doc_id: The document/point/row ID to delete

        Returns:
            Dict with deletion status per backend.
        """
        pg_task = delete_postgres(doc_id)
        qdrant_task = delete_qdrant(doc_id)

        results_list = await asyncio.gather(pg_task, qdrant_task, return_exceptions=True)
        pg_result, qdrant_result = results_list

        redis_deleted = delete_redis(doc_id)

        return {
            "postgres": pg_result if not isinstance(pg_result, Exception) else {"error": str(pg_result)},
            "qdrant": qdrant_result if not isinstance(qdrant_result, Exception) else {"error": str(qdrant_result)},
            "redis": {"deleted": redis_deleted},
        }

    async def health_check(self) -> Dict[str, Any]:
        """
        Check the health/connectivity of all 5 backends.

        Returns:
            Dict mapping backend name to True/False (or error string).
        """
        status: Dict[str, Any] = {}

        # PostgreSQL
        try:
            import psycopg2

            sys.path.insert(0, str(Path(GENESIS_ROOT) / "data" / "genesis-memory"))
            import importlib, elestio_config
            importlib.reload(elestio_config)

            conn = psycopg2.connect(**elestio_config.PostgresConfig.get_connection_params())
            cur = conn.cursor()
            cur.execute("SELECT 1")
            conn.close()
            status["postgres"] = True
        except Exception as e:
            status["postgres"] = f"ERROR: {e}"

        # Qdrant
        try:
            from qdrant_client import QdrantClient

            sys.path.insert(0, str(Path(GENESIS_ROOT) / "data" / "genesis-memory"))
            import elestio_config

            cfg = elestio_config.QdrantConfig()
            client = QdrantClient(url=cfg.url, api_key=cfg.api_key, timeout=5)
            client.get_collections()
            status["qdrant"] = True
        except Exception as e:
            status["qdrant"] = f"ERROR: {e}"

        # Redis
        try:
            r = _get_redis_client()
            r.ping()
            status["redis"] = True
        except Exception as e:
            status["redis"] = f"ERROR: {e}"

        # Supermemory
        try:
            async with httpx.AsyncClient(timeout=5) as client:
                resp = await client.get(
                    "https://api.supermemory.ai/v3/memories",
                    headers={"Authorization": f"Bearer {SUPERMEMORY_API_KEY}"},
                    params={"limit": 1},
                )
                status["supermemory"] = resp.status_code in (200, 204)
        except Exception as e:
            status["supermemory"] = f"ERROR: {e}"

        # File KG
        try:
            kg_exists = KG_DIR.exists()
            status["file_kg"] = kg_exists
        except Exception as e:
            status["file_kg"] = f"ERROR: {e}"

        return status

    def __repr__(self) -> str:
        state = "connected" if self._connected else "not connected"
        return f"GenesisMemoryHub({state})"


# ---------------------------------------------------------------------------
# CLI entrypoint
# ---------------------------------------------------------------------------
if __name__ == "__main__":
    import argparse

    parser = argparse.ArgumentParser(
        description="Genesis Memory Hub - Unified query across all memory systems"
    )
    parser.add_argument("query", nargs="?", help="Search query")
    parser.add_argument("--systems", nargs="+", help="Systems to search (supermemory graphiti file_kg qdrant postgres)")
    parser.add_argument("--max-results", type=int, default=10)
    parser.add_argument("--verbose", action="store_true")
    parser.add_argument("--health", action="store_true", help="Run health check")
    args = parser.parse_args()

    if args.health:
        hub = GenesisMemoryHub()
        status = asyncio.run(hub.health_check())
        print("\n=== Genesis Memory Hub Health ===")
        for backend, ok in status.items():
            icon = "[OK]" if ok is True else "[FAIL]"
            print(f"  {icon} {backend}: {ok}")
        sys.exit(0)

    query_text = args.query or " ".join(sys.argv[1:])
    if not query_text:
        parser.print_help()
        sys.exit(1)

    start_time = time.time()
    results = query_sync(
        query_text,
        max_results=args.max_results,
        systems=args.systems,
        verbose=args.verbose,
    )
    total_time = time.time() - start_time

    print(f"\n=== Genesis Memory Hub: '{query_text}' ===")
    print(f"Found {len(results)} results in {total_time:.2f}s\n")

    errors = []
    good = []
    for r in results:
        if r.content.startswith("[ERROR]") or r.content.startswith("[SKIP]"):
            errors.append(r)
        elif r.source != "_meta":
            good.append(r)

    for i, r in enumerate(good, 1):
        print(_format_result(r, i))
        print()

    if errors and args.verbose:
        print("\n--- System Errors ---")
        for e in errors:
            print(f"  {e.source}: {e.content}")

    if args.verbose:
        meta = [r for r in results if r.source == "_meta"]
        for m in meta:
            print(f"\n{m.content}")
