"""
kg_query_before_execution.py
-----------------------------
Auto-queries the Knowledge Graph before every Genesis task starts, and injects
the top N relevant axioms into the agent system prompt.

Usage (CLI):
    python3 core/kg_query_before_execution.py --query "build voice bridge" --top 5
    python3 core/kg_query_before_execution.py --query "build voice bridge" --top 5 --inject

Programmatic:
    from core.kg_query_before_execution import kg_preflight, inject_axioms_into_prompt
"""

import os
import json
import argparse
import pathlib
from datetime import datetime, timezone
from typing import Optional

# ---------------------------------------------------------------------------
# Paths
# ---------------------------------------------------------------------------
_GENESIS_ROOT = pathlib.Path("/mnt/e/genesis-system")
_AXIOMS_DIR   = _GENESIS_ROOT / "KNOWLEDGE_GRAPH" / "axioms"
_ENTITIES_DIR = _GENESIS_ROOT / "KNOWLEDGE_GRAPH" / "entities"

# ---------------------------------------------------------------------------
# Qdrant / Gemini config
# ---------------------------------------------------------------------------
QDRANT_HOST       = "qdrant-b3knu-u50607.vm.elestio.app"
QDRANT_PORT       = 6333
QDRANT_COLLECTION = "genesis_memories"

# Available model confirmed via client.models.list() — supports embedContent
EMBED_MODEL = "models/gemini-embedding-001"


# ---------------------------------------------------------------------------
# Helper: load environment (support secrets.env file as fallback)
# ---------------------------------------------------------------------------
def _load_env() -> None:
    """Load /mnt/e/genesis-system/config/secrets.env into os.environ if not set."""
    secrets_path = _GENESIS_ROOT / "config" / "secrets.env"
    if secrets_path.exists():
        with open(secrets_path) as fh:
            for raw in fh:
                line = raw.strip()
                if not line or line.startswith("#") or "=" not in line:
                    continue
                key, _, val = line.partition("=")
                key = key.strip()
                val = val.strip().strip('"').strip("'")
                if key and key not in os.environ:
                    os.environ[key] = val


def _gemini_api_key() -> Optional[str]:
    _load_env()
    for var in ("GEMINI_API_KEY_NEW", "GEMINI_API_KEY", "GOOGLE_API_KEY"):
        val = os.environ.get(var)
        if val:
            return val
    return None


def _safe_float(val, default: float = 0.0) -> float:
    """Convert val to float safely, mapping string confidence labels."""
    if isinstance(val, (int, float)):
        return float(val)
    if isinstance(val, str):
        mapping = {
            "very_high": 0.95, "high": 0.9, "medium": 0.7,
            "low": 0.5, "very_low": 0.3,
        }
        lowered = val.strip().lower()
        if lowered in mapping:
            return mapping[lowered]
        try:
            return float(val)
        except (ValueError, TypeError):
            return default
    return default


# ---------------------------------------------------------------------------
# Embedding via Gemini
# ---------------------------------------------------------------------------
def _embed_text(text: str) -> Optional[list]:
    """Return embedding vector for text using Gemini gemini-embedding-001."""
    api_key = _gemini_api_key()
    if not api_key:
        return None
    try:
        from google import genai as new_genai
        client = new_genai.Client(api_key=api_key)
        response = client.models.embed_content(
            model=EMBED_MODEL,
            contents=[text],
        )
        # EmbedContentResponse -> .embeddings[0] (ContentEmbedding) -> .values
        return list(response.embeddings[0].values)
    except Exception as exc:
        print(f"[kg_query] Gemini embed error: {exc}")
        return None


# ---------------------------------------------------------------------------
# Qdrant query  (qdrant-client >= 1.7, uses query_points)
# ---------------------------------------------------------------------------
def _query_qdrant(vector: list, top_n: int) -> list:
    """Query Qdrant collection and return scored axiom records."""
    try:
        from qdrant_client import QdrantClient
        client = QdrantClient(
            url=f"https://{QDRANT_HOST}:{QDRANT_PORT}",
            api_key=os.environ.get("QDRANT_API_KEY", "7b74e6621bd0e6650789f6662bca4cbf4143d3d1d710a0002b3b563973ca6876"),
            timeout=8.0,
            prefer_grpc=False,
        )
        # query_points is the current API (qdrant-client >= 1.7)
        response = client.query_points(
            collection_name=QDRANT_COLLECTION,
            query=vector,
            limit=top_n,
            with_payload=True,
        )
        # response.points is a list of ScoredPoint
        points = response.points if hasattr(response, "points") else response
        axioms = []
        for hit in points:
            payload = hit.payload or {}
            content = payload.get("content") or payload.get("axiom") or ""
            axioms.append({
                "content":         content,
                "source":          payload.get("source", "unknown"),
                "confidence":      _safe_float(payload.get("confidence", 0.0)),
                "relevance_score": float(hit.score),
                "id":              payload.get("id", str(hit.id)),
            })
        return axioms
    except Exception as exc:
        print(f"[kg_query] Qdrant query failed: {exc}")
        return []


# ---------------------------------------------------------------------------
# JSONL fallback — keyword similarity
# ---------------------------------------------------------------------------
def _keyword_score(text: str, query: str) -> float:
    """Simple keyword overlap score (0-1) for fallback ranking."""
    query_tokens = set(query.lower().split())
    text_tokens  = set(text.lower().split())
    if not query_tokens:
        return 0.0
    return len(query_tokens & text_tokens) / len(query_tokens)


def _fallback_jsonl_query(task_description: str, top_n: int) -> list:
    """Scan all JSONL axiom files and return top N by keyword overlap."""
    candidates: list = []
    if not _AXIOMS_DIR.exists():
        return []

    for jsonl_path in sorted(_AXIOMS_DIR.glob("*.jsonl")):
        try:
            with open(jsonl_path, encoding="utf-8", errors="replace") as fh:
                for line in fh:
                    line = line.strip()
                    if not line:
                        continue
                    try:
                        record = json.loads(line)
                    except json.JSONDecodeError:
                        continue
                    content = record.get("content") or record.get("axiom") or ""
                    if not content:
                        continue
                    candidates.append({
                        "content":         content,
                        "source":          record.get("source", jsonl_path.stem),
                        "confidence":      _safe_float(record.get("confidence", 0.0)),
                        "relevance_score": _keyword_score(content, task_description),
                        "id":              record.get("id", ""),
                    })
        except Exception as exc:
            print(f"[kg_query] Error reading {jsonl_path.name}: {exc}")
            continue

    candidates.sort(key=lambda x: (x["relevance_score"], x["confidence"]), reverse=True)
    return candidates[:top_n]


# ---------------------------------------------------------------------------
# Public API
# ---------------------------------------------------------------------------
def get_relevant_axioms(task_description: str, top_n: int = 3) -> list:
    """
    Query the Knowledge Graph for axioms semantically relevant to task_description.

    Uses the unified Nervous System retriever (Qdrant + PG + KG files).
    Falls back to legacy pipeline if nervous system import fails.

    Returns:
        List of dicts: {content, source, confidence, relevance_score}
    """
    try:
        from core.nervous_system.runtime_query import query_knowledge_structured
        chunks = query_knowledge_structured(task_description, top_k=top_n)
        if chunks:
            return [
                {
                    "content": c.content,
                    "source": c.source,
                    "confidence": c.relevance_score,
                    "relevance_score": c.relevance_score,
                }
                for c in chunks
            ]
    except Exception as exc:
        print(f"[kg_query] Unified retriever failed ({exc}) — falling back to legacy.")

    # Legacy fallback
    vector = _embed_text(task_description)
    if vector:
        results = _query_qdrant(vector, top_n)
        if results:
            return results
        print("[kg_query] Qdrant returned 0 results — falling back to JSONL scan.")
    else:
        print("[kg_query] Embedding unavailable — falling back to JSONL scan.")

    return _fallback_jsonl_query(task_description, top_n)


def inject_axioms_into_prompt(base_prompt: str, axioms: list) -> str:
    """
    Prepend an axiom block to base_prompt.

    Output format:
        ## Relevant Genesis Axioms (auto-injected)
        1. [content] (source: X, confidence: Y)
        ...

        ## Task
        [base_prompt]
    """
    if not axioms:
        return base_prompt

    lines = ["## Relevant Genesis Axioms (auto-injected)"]
    for idx, ax in enumerate(axioms, start=1):
        lines.append(
            f"{idx}. {ax.get('content', '')} "
            f"(source: {ax.get('source', 'unknown')}, "
            f"confidence: {ax.get('confidence', 0.0)})"
        )
    lines.append("")
    lines.append("## Task")
    lines.append(base_prompt)

    return "\n".join(lines)


def kg_preflight(task_description: str) -> dict:
    """
    Full KG preflight for a task.

    Returns:
        {
            axioms:        list[dict],  # top 3 relevant axioms
            entity_count:  int,         # total entity records across KG files
            axiom_count:   int,         # total axiom records across JSONL files
            timestamp:     str,         # ISO-8601 UTC
        }
    """
    axioms = get_relevant_axioms(task_description, top_n=3)

    axiom_count  = 0
    entity_count = 0

    if _AXIOMS_DIR.exists():
        for p in _AXIOMS_DIR.glob("*.jsonl"):
            try:
                with open(p, encoding="utf-8", errors="replace") as fh:
                    axiom_count += sum(1 for ln in fh if ln.strip())
            except Exception:
                pass

    if _ENTITIES_DIR.exists():
        for p in list(_ENTITIES_DIR.glob("*.jsonl")) + list(_ENTITIES_DIR.glob("*.json")):
            try:
                with open(p, encoding="utf-8", errors="replace") as fh:
                    raw = fh.read().strip()
                if not raw:
                    continue
                try:
                    parsed = json.loads(raw)
                    entity_count += len(parsed) if isinstance(parsed, list) else 1
                except json.JSONDecodeError:
                    entity_count += sum(1 for ln in raw.splitlines() if ln.strip())
            except Exception:
                pass

    entities_root = _GENESIS_ROOT / "KNOWLEDGE_GRAPH" / "entities.jsonl"
    if entities_root.exists():
        try:
            with open(entities_root, encoding="utf-8", errors="replace") as fh:
                entity_count += sum(1 for ln in fh if ln.strip())
        except Exception:
            pass

    return {
        "axioms":        axioms,
        "entity_count":  entity_count,
        "axiom_count":   axiom_count,
        "timestamp":     datetime.now(timezone.utc).isoformat(),
    }


# ---------------------------------------------------------------------------
# CLI entry-point
# ---------------------------------------------------------------------------
def _cli() -> None:
    parser = argparse.ArgumentParser(
        description="Query Genesis Knowledge Graph for relevant axioms.",
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog="""
Examples:
  python3 core/kg_query_before_execution.py --query "voice bridge MCP server" --top 3
  python3 core/kg_query_before_execution.py --query "build landing page" --top 5 --inject
""",
    )
    parser.add_argument("--query", "-q", required=True,
                        help="Task description to search for relevant axioms.")
    parser.add_argument("--top", "-n", type=int, default=3,
                        help="Number of top axioms to return (default: 3).")
    parser.add_argument("--inject", action="store_true",
                        help="Print example of axioms injected into a sample prompt.")
    args = parser.parse_args()

    print(f"\n[kg_query] Querying KG for: '{args.query}'  (top {args.top})\n")

    if args.top == 3:
        result = kg_preflight(args.query)
        print(f"KG Stats  — axioms: {result['axiom_count']}  |  entities: {result['entity_count']}")
        print(f"Timestamp — {result['timestamp']}\n")
    else:
        result = {
            "axioms":    get_relevant_axioms(args.query, top_n=args.top),
            "timestamp": datetime.now(timezone.utc).isoformat(),
        }

    axioms = result["axioms"]
    if not axioms:
        print("No axioms found.")
        return

    print(f"Top {len(axioms)} axiom(s) found:\n")
    for i, ax in enumerate(axioms, start=1):
        print(f"  {i}. {ax['content']}")
        print(f"     source: {ax['source']}  |  confidence: {ax['confidence']}  |  relevance: {ax['relevance_score']:.4f}")
        print()

    if args.inject:
        injected = inject_axioms_into_prompt("Build the voice bridge module.", axioms)
        print("--- Injected prompt preview ---")
        print(injected)
        print("--- end ---")


if __name__ == "__main__":
    _cli()
