#!/usr/bin/env python3
"""
Genesis RAG Query Engine
========================
Semantic search across the bloodstream knowledge base.

Embeds a query with gemini-embedding-001 (3072d), searches Qdrant
genesis_memories collection, optionally enriches with PostgreSQL metadata.

Usage:
    python3 -m core.rag_query "How does Telnyx assistant creation work?"
    python3 -m core.rag_query --top 10 "GHL pipeline setup"
    python3 -m core.rag_query --json "SubAIVA provisioning"
"""

import os
import sys
import json
import argparse
from typing import List, Dict, Any, Optional

# ---------------------------------------------------------------------------
# Config
# ---------------------------------------------------------------------------
QDRANT_URL = os.getenv(
    "QDRANT_URL", "https://qdrant-b3knu-u50607.vm.elestio.app:6333"
)
QDRANT_API_KEY = os.getenv(
    "QDRANT_API_KEY",
    "7b74e6621bd0e6650789f6662bca4cbf4143d3d1d710a0002b3b563973ca6876",
)
COLLECTION = "genesis_memories"
EMBED_MODEL = "gemini-embedding-001"
VECTOR_DIM = 3072

GEMINI_API_KEY = os.getenv("GEMINI_API_KEY", "")
if not GEMINI_API_KEY:
    # Fallback: read from secrets.env
    secrets_path = "/mnt/e/genesis-system/config/secrets.env"
    if os.path.exists(secrets_path):
        with open(secrets_path) as f:
            for line in f:
                if line.startswith("GEMINI_API_KEY="):
                    GEMINI_API_KEY = line.split("=", 1)[1].strip().strip("'\"")
                    break


# ---------------------------------------------------------------------------
# Embedding
# ---------------------------------------------------------------------------
_client = None


def _get_genai_client():
    global _client
    if _client is None:
        from google import genai as google_genai
        _client = google_genai.Client(api_key=GEMINI_API_KEY)
    return _client


def embed_query(text: str) -> List[float]:
    """Embed a query string using Gemini embedding model."""
    client = _get_genai_client()
    result = client.models.embed_content(
        model=EMBED_MODEL,
        contents=text,
    )
    return list(result.embeddings[0].values)


# ---------------------------------------------------------------------------
# Qdrant Search
# ---------------------------------------------------------------------------
def search_qdrant(
    query_vector: List[float],
    top_k: int = 5,
    score_threshold: float = 0.3,
    type_filter: Optional[str] = None,
) -> List[Dict[str, Any]]:
    """Search Qdrant genesis_memories collection."""
    from qdrant_client import QdrantClient
    from qdrant_client.models import Filter, FieldCondition, MatchValue

    client = QdrantClient(url=QDRANT_URL, api_key=QDRANT_API_KEY)

    query_filter = None
    if type_filter:
        query_filter = Filter(
            must=[
                FieldCondition(
                    key="type", match=MatchValue(value=type_filter.upper())
                )
            ]
        )

    results = client.query_points(
        collection_name=COLLECTION,
        query=query_vector,
        limit=top_k,
        score_threshold=score_threshold,
        query_filter=query_filter,
        with_payload=True,
    )

    hits = []
    for point in results.points:
        payload = point.payload or {}
        hits.append(
            {
                "id": str(point.id),
                "score": round(point.score, 4),
                "title": payload.get("title", ""),
                "type": payload.get("type", ""),
                "source": payload.get("source_url", "") or payload.get("source", ""),
                "content": payload.get("text", "") or payload.get("content", ""),
                "tags": payload.get("tags", []),
                "platform": payload.get("platform", ""),
                "confidence": payload.get("confidence", 0),
            }
        )

    return hits


# ---------------------------------------------------------------------------
# Combined RAG query
# ---------------------------------------------------------------------------
def rag_query(
    question: str,
    top_k: int = 5,
    score_threshold: float = 0.3,
    type_filter: Optional[str] = None,
) -> List[Dict[str, Any]]:
    """
    Full RAG query: embed question -> search Qdrant -> return results.

    Args:
        question: Natural language query
        top_k: Number of results to return
        score_threshold: Minimum similarity score (0-1)
        type_filter: Optional type filter (e.g., "PRODUCT_SPEC")

    Returns:
        List of matching knowledge entries with scores
    """
    vector = embed_query(question)
    return search_qdrant(
        query_vector=vector,
        top_k=top_k,
        score_threshold=score_threshold,
        type_filter=type_filter,
    )


def rag_context(question: str, top_k: int = 5) -> str:
    """
    RAG query that returns a formatted context string ready for LLM injection.

    This is the function agents should call to get relevant knowledge
    before generating a response.
    """
    results = rag_query(question, top_k=top_k)
    if not results:
        return "[No relevant knowledge found in bloodstream]"

    lines = [f"=== BLOODSTREAM KNOWLEDGE ({len(results)} matches) ===\n"]
    for i, r in enumerate(results, 1):
        lines.append(f"[{i}] {r['title']} (score: {r['score']}, type: {r['type']})")
        lines.append(f"    Source: {r['source']}")
        content = r["content"][:500]
        lines.append(f"    {content}")
        lines.append("")

    return "\n".join(lines)


# ---------------------------------------------------------------------------
# CLI
# ---------------------------------------------------------------------------
def main():
    parser = argparse.ArgumentParser(description="Genesis RAG Query Engine")
    parser.add_argument("query", help="Search query")
    parser.add_argument("--top", type=int, default=5, help="Number of results")
    parser.add_argument("--threshold", type=float, default=0.3, help="Min score")
    parser.add_argument("--type", dest="type_filter", help="Filter by type")
    parser.add_argument("--json", action="store_true", help="JSON output")
    parser.add_argument("--context", action="store_true", help="LLM context format")
    args = parser.parse_args()

    if args.context:
        print(rag_context(args.query, top_k=args.top))
        return

    results = rag_query(
        args.query,
        top_k=args.top,
        score_threshold=args.threshold,
        type_filter=args.type_filter,
    )

    if args.json:
        print(json.dumps(results, indent=2, default=str))
        return

    if not results:
        print("No results found.")
        return

    print(f"\n{'='*70}")
    print(f"  RAG QUERY: {args.query}")
    print(f"  Results: {len(results)} | Collection: {COLLECTION} | Dim: {VECTOR_DIM}")
    print(f"{'='*70}\n")

    for i, r in enumerate(results, 1):
        print(f"  [{i}] {r['title']}")
        print(f"      Score: {r['score']}  |  Type: {r['type']}  |  Source: {r['source']}")
        content = r["content"][:200]
        print(f"      {content}...")
        if r["tags"]:
            print(f"      Tags: {', '.join(r['tags'][:5])}")
        print()


if __name__ == "__main__":
    main()
