#!/usr/bin/env python3
"""
YouTube Knowledge Pipeline - Qdrant Vector Ingestion Module (STORY-007)

Ingests YouTube transcripts and extracted entities into Qdrant vector database
for semantic search and similarity matching.

Features:
- Semantic chunking with overlap for context preservation
- Gemini text-embedding-004 embeddings (768 dimensions)
- Entity-level embeddings for knowledge graph
- Video similarity search
- Full-text semantic search

Qdrant Collection: youtube_knowledge
- vectors: 768 dimensions (Gemini embedding)
- payload: video_id, title, channel, chunk_text, chunk_index, timestamp, entity_type

Usage:
    from core.youtube.qdrant_ingestion import QdrantVideoIngester

    ingester = QdrantVideoIngester()

    # Ingest a transcript
    result = ingester.ingest_transcript(
        video_id="abc123",
        text="Full transcript text...",
        metadata={"title": "Video Title", "channel": "Channel Name"}
    )

    # Search
    results = ingester.search("voice AI implementation", limit=10)

    # Find similar videos
    similar = ingester.get_similar_videos("abc123", limit=5)

Author: Genesis System
Version: 1.0.0
STORY-007: Qdrant Vector Ingestion for YouTube Pipeline
"""

import os
import sys
import json
import hashlib
import logging
import time
import re
from datetime import datetime
from pathlib import Path
from typing import List, Dict, Any, Optional, Tuple
from dataclasses import dataclass, field, asdict
import uuid

# Add genesis-system to path
sys.path.insert(0, '/mnt/e/genesis-system')

logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

# Paths
GENESIS_ROOT = Path("/mnt/e/genesis-system")
SECRETS_PATH = GENESIS_ROOT / "config" / "secrets.env"


def load_secrets() -> Dict[str, str]:
    """Load secrets from secrets.env file."""
    secrets = {}
    if SECRETS_PATH.exists():
        with open(SECRETS_PATH) as f:
            for line in f:
                line = line.strip()
                if line and not line.startswith('#') and '=' in line:
                    key, value = line.split('=', 1)
                    secrets[key] = value
    return secrets


# Load API keys
_secrets = load_secrets()
GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY") or _secrets.get("GEMINI_API_KEY", "")
QDRANT_HOST = os.environ.get("GENESIS_QDRANT_HOST") or _secrets.get("GENESIS_QDRANT_HOST", "qdrant-b3knu-u50607.vm.elestio.app")
QDRANT_PORT = int(os.environ.get("GENESIS_QDRANT_PORT") or _secrets.get("GENESIS_QDRANT_PORT", "6333"))
QDRANT_API_KEY = os.environ.get("GENESIS_QDRANT_API_KEY") or _secrets.get("GENESIS_QDRANT_API_KEY", "7b74e6621bd0e6650789f6662bca4cbf4143d3d1d710a0002b3b563973ca6876")


@dataclass
class ChunkMetadata:
    """Metadata for a transcript chunk."""
    video_id: str
    chunk_index: int
    start_time: float
    chunk_text: str
    title: str = ""
    channel: str = ""
    language: str = "en"
    total_chunks: int = 0


@dataclass
class IngestionResult:
    """Result from ingestion operation."""
    success: bool
    video_id: str
    chunks_stored: int = 0
    entities_stored: int = 0
    error: Optional[str] = None
    ingestion_time_ms: int = 0


@dataclass
class SearchResult:
    """Result from semantic search."""
    id: str
    video_id: str
    text: str
    score: float
    title: str = ""
    channel: str = ""
    chunk_index: int = 0
    timestamp: float = 0.0
    entity_type: Optional[str] = None
    payload: Dict[str, Any] = field(default_factory=dict)


class GeminiEmbedder:
    """
    Gemini embedding generator using text-embedding-004.

    Produces 768-dimensional embeddings optimized for semantic similarity.
    """

    MODEL = "text-embedding-004"
    DIMENSIONS = 768
    MAX_TOKENS = 2048  # Max input tokens for embedding

    def __init__(self, api_key: Optional[str] = None):
        self.api_key = api_key or GEMINI_API_KEY
        if not self.api_key:
            raise ValueError("GEMINI_API_KEY not configured")

        # Try to use google-generativeai SDK
        self.client = None
        try:
            import google.generativeai as genai
            genai.configure(api_key=self.api_key)
            self.client = genai
            logger.info("Initialized Gemini embedder with google-generativeai SDK")
        except ImportError:
            logger.warning("google-generativeai not installed, using REST API fallback")

    def embed(self, text: str) -> List[float]:
        """Generate embedding for a single text."""
        if not text or not text.strip():
            # Return zero vector for empty text
            return [0.0] * self.DIMENSIONS

        # Truncate if too long
        if len(text) > self.MAX_TOKENS * 4:  # Rough char estimate
            text = text[:self.MAX_TOKENS * 4]

        if self.client:
            return self._embed_sdk(text)
        else:
            return self._embed_rest(text)

    def embed_batch(self, texts: List[str], batch_size: int = 100) -> List[List[float]]:
        """Generate embeddings for multiple texts."""
        embeddings = []

        for i in range(0, len(texts), batch_size):
            batch = texts[i:i + batch_size]

            if self.client:
                batch_embeddings = self._embed_batch_sdk(batch)
            else:
                batch_embeddings = [self._embed_rest(t) for t in batch]

            embeddings.extend(batch_embeddings)

            # Rate limiting between batches
            if i + batch_size < len(texts):
                time.sleep(0.1)

        return embeddings

    def _embed_sdk(self, text: str) -> List[float]:
        """Generate embedding using SDK."""
        try:
            result = self.client.embed_content(
                model=f"models/{self.MODEL}",
                content=text,
                task_type="retrieval_document"
            )
            return result['embedding']
        except Exception as e:
            logger.error(f"SDK embedding failed: {e}")
            raise

    def _embed_batch_sdk(self, texts: List[str]) -> List[List[float]]:
        """Generate batch embeddings using SDK."""
        try:
            result = self.client.embed_content(
                model=f"models/{self.MODEL}",
                content=texts,
                task_type="retrieval_document"
            )
            # Handle both single and batch responses
            if isinstance(result.get('embedding'), list) and len(result['embedding']) > 0:
                if isinstance(result['embedding'][0], list):
                    return result['embedding']
                else:
                    return [result['embedding']]
            return [[0.0] * self.DIMENSIONS for _ in texts]
        except Exception as e:
            logger.error(f"SDK batch embedding failed: {e}")
            # Fall back to individual embedding
            return [self._embed_sdk(t) for t in texts]

    def _embed_rest(self, text: str) -> List[float]:
        """Generate embedding using REST API."""
        import urllib.request
        import urllib.error

        url = f"https://generativelanguage.googleapis.com/v1beta/models/{self.MODEL}:embedContent"

        payload = {
            "model": f"models/{self.MODEL}",
            "content": {
                "parts": [{"text": text}]
            },
            "taskType": "RETRIEVAL_DOCUMENT"
        }

        headers = {
            "Content-Type": "application/json",
            "x-goog-api-key": self.api_key
        }

        try:
            req = urllib.request.Request(
                url,
                data=json.dumps(payload).encode(),
                headers=headers,
                method="POST"
            )

            with urllib.request.urlopen(req, timeout=30) as response:
                result = json.loads(response.read().decode())
                return result.get("embedding", {}).get("values", [0.0] * self.DIMENSIONS)

        except Exception as e:
            logger.error(f"REST embedding failed: {e}")
            raise


class TranscriptChunker:
    """
    Semantic chunker for YouTube transcripts.

    Splits transcripts into overlapping chunks for better context
    preservation during retrieval.
    """

    def __init__(
        self,
        target_tokens: int = 400,
        overlap_tokens: int = 50,
        min_chunk_tokens: int = 100
    ):
        """
        Initialize chunker.

        Args:
            target_tokens: Target chunk size (300-500 recommended)
            overlap_tokens: Token overlap between chunks
            min_chunk_tokens: Minimum tokens per chunk
        """
        self.target_tokens = target_tokens
        self.overlap_tokens = overlap_tokens
        self.min_chunk_tokens = min_chunk_tokens

    def _estimate_tokens(self, text: str) -> int:
        """Rough token count estimation (4 chars per token)."""
        return len(text) // 4

    def _find_sentence_boundary(self, text: str, target_pos: int) -> int:
        """Find the nearest sentence boundary."""
        # Look for sentence endings near target position
        sentence_endings = ['. ', '! ', '? ', '.\n', '!\n', '?\n']

        search_start = max(0, target_pos - 100)
        search_end = min(len(text), target_pos + 100)
        search_region = text[search_start:search_end]

        best_pos = -1
        best_dist = float('inf')

        for ending in sentence_endings:
            idx = 0
            while True:
                pos = search_region.find(ending, idx)
                if pos == -1:
                    break

                abs_pos = search_start + pos + len(ending)
                dist = abs(abs_pos - target_pos)

                if dist < best_dist:
                    best_dist = dist
                    best_pos = abs_pos

                idx = pos + 1

        return best_pos if best_pos != -1 else target_pos

    def chunk(
        self,
        text: str,
        segments: Optional[List[Dict[str, Any]]] = None
    ) -> List[Tuple[str, float]]:
        """
        Chunk transcript into semantic paragraphs.

        Args:
            text: Full transcript text
            segments: Optional list of timed segments [{"text": str, "start": float}]

        Returns:
            List of (chunk_text, start_time) tuples
        """
        if not text or not text.strip():
            return []

        chunks = []

        if segments:
            # Use segment timing for more accurate timestamps
            chunks = self._chunk_with_segments(segments)
        else:
            # Chunk plain text
            chunks = self._chunk_plain_text(text)

        return chunks

    def _chunk_plain_text(self, text: str) -> List[Tuple[str, float]]:
        """Chunk plain text without timing info."""
        chunks = []

        # Clean text
        text = text.strip()
        text = re.sub(r'\n+', '\n', text)

        # Target characters based on token estimate
        target_chars = self.target_tokens * 4
        overlap_chars = self.overlap_tokens * 4

        pos = 0
        chunk_index = 0

        while pos < len(text):
            # Find end position
            end_target = pos + target_chars

            if end_target >= len(text):
                # Last chunk
                chunk_text = text[pos:].strip()
                if self._estimate_tokens(chunk_text) >= self.min_chunk_tokens:
                    # Estimate time based on position
                    estimated_time = (pos / len(text)) * 100  # Rough estimate
                    chunks.append((chunk_text, estimated_time))
                elif chunks:
                    # Append to previous chunk if too small
                    prev_text, prev_time = chunks[-1]
                    chunks[-1] = (prev_text + " " + chunk_text, prev_time)
                break

            # Find sentence boundary
            end_pos = self._find_sentence_boundary(text, end_target)

            chunk_text = text[pos:end_pos].strip()

            if chunk_text:
                estimated_time = (pos / len(text)) * 100
                chunks.append((chunk_text, estimated_time))

            # Move position with overlap
            pos = end_pos - overlap_chars
            chunk_index += 1

        return chunks

    def _chunk_with_segments(
        self,
        segments: List[Dict[str, Any]]
    ) -> List[Tuple[str, float]]:
        """Chunk using timed segments for accurate timestamps."""
        chunks = []

        current_chunk = []
        current_tokens = 0
        chunk_start_time = 0.0

        for seg in segments:
            seg_text = seg.get("text", "").strip()
            seg_start = seg.get("start", 0.0)
            seg_tokens = self._estimate_tokens(seg_text)

            if not current_chunk:
                chunk_start_time = seg_start

            if current_tokens + seg_tokens > self.target_tokens and current_tokens >= self.min_chunk_tokens:
                # Save current chunk
                chunk_text = " ".join(current_chunk)
                chunks.append((chunk_text, chunk_start_time))

                # Start new chunk with overlap
                overlap_segments = []
                overlap_tokens = 0

                for past_seg in reversed(current_chunk):
                    past_tokens = self._estimate_tokens(past_seg)
                    if overlap_tokens + past_tokens <= self.overlap_tokens:
                        overlap_segments.insert(0, past_seg)
                        overlap_tokens += past_tokens
                    else:
                        break

                current_chunk = overlap_segments
                current_tokens = overlap_tokens
                chunk_start_time = seg_start

            current_chunk.append(seg_text)
            current_tokens += seg_tokens

        # Don't forget the last chunk
        if current_chunk:
            chunk_text = " ".join(current_chunk)
            if self._estimate_tokens(chunk_text) >= self.min_chunk_tokens:
                chunks.append((chunk_text, chunk_start_time))
            elif chunks:
                # Merge with previous
                prev_text, prev_time = chunks[-1]
                chunks[-1] = (prev_text + " " + chunk_text, prev_time)

        return chunks


class QdrantVideoIngester:
    """
    Qdrant vector ingestion for YouTube knowledge pipeline.

    Connects to Elestio Qdrant and stores transcript chunks and
    entity embeddings for semantic search.

    Collection: youtube_knowledge
    - Vector dimensions: 768 (Gemini text-embedding-004)
    - Distance: Cosine similarity
    """

    COLLECTION_NAME = "youtube_knowledge"
    VECTOR_SIZE = 768

    def __init__(
        self,
        host: str = None,
        port: int = None,
        api_key: str = None,
        gemini_api_key: str = None
    ):
        """
        Initialize the ingester.

        Args:
            host: Qdrant host (defaults to Elestio)
            port: Qdrant port
            api_key: Qdrant API key
            gemini_api_key: Gemini API key for embeddings
        """
        self.host = host or QDRANT_HOST
        self.port = port or QDRANT_PORT
        self.api_key = api_key or QDRANT_API_KEY

        # Initialize Qdrant client
        self.client = None
        self._init_qdrant()

        # Initialize embedder
        self.embedder = GeminiEmbedder(api_key=gemini_api_key)

        # Initialize chunker
        self.chunker = TranscriptChunker(
            target_tokens=400,
            overlap_tokens=50
        )

        logger.info(f"QdrantVideoIngester initialized: {self.host}:{self.port}")

    def _init_qdrant(self):
        """Initialize Qdrant client and ensure collection exists."""
        try:
            from qdrant_client import QdrantClient
            from qdrant_client.http import models
            from qdrant_client.http.models import Distance, VectorParams

            # Build HTTPS URL
            url = f"https://{self.host}:{self.port}"

            self.client = QdrantClient(
                url=url,
                api_key=self.api_key,
                timeout=60
            )

            # Check/create collection
            self._ensure_collection()

        except ImportError:
            logger.error("qdrant-client not installed. Run: pip install qdrant-client")
            raise
        except Exception as e:
            logger.error(f"Failed to initialize Qdrant: {e}")
            raise

    def _ensure_collection(self):
        """Ensure the youtube_knowledge collection exists."""
        from qdrant_client.http.models import Distance, VectorParams
        from qdrant_client.http import models

        try:
            collections = self.client.get_collections().collections
            existing = {c.name for c in collections}

            if self.COLLECTION_NAME not in existing:
                logger.info(f"Creating collection: {self.COLLECTION_NAME}")

                self.client.create_collection(
                    collection_name=self.COLLECTION_NAME,
                    vectors_config=VectorParams(
                        size=self.VECTOR_SIZE,
                        distance=Distance.COSINE
                    ),
                    hnsw_config=models.HnswConfigDiff(
                        m=16,
                        ef_construct=100
                    ),
                    optimizers_config=models.OptimizersConfigDiff(
                        indexing_threshold=20000
                    )
                )

                logger.info(f"Collection {self.COLLECTION_NAME} created successfully")
            else:
                logger.info(f"Collection {self.COLLECTION_NAME} already exists")

        except Exception as e:
            logger.error(f"Failed to ensure collection: {e}")
            raise

    def ingest_transcript(
        self,
        video_id: str,
        text: str,
        metadata: Optional[Dict[str, Any]] = None,
        segments: Optional[List[Dict[str, Any]]] = None
    ) -> IngestionResult:
        """
        Ingest a transcript into Qdrant.

        Chunks the transcript, generates embeddings, and stores in Qdrant.

        Args:
            video_id: YouTube video ID
            text: Full transcript text
            metadata: Video metadata (title, channel, etc.)
            segments: Optional timed segments for accurate timestamps

        Returns:
            IngestionResult with status and counts
        """
        start_time = time.time()
        metadata = metadata or {}

        try:
            # Chunk transcript
            chunks = self.chunker.chunk(text, segments)

            if not chunks:
                return IngestionResult(
                    success=False,
                    video_id=video_id,
                    error="No chunks generated from transcript"
                )

            logger.info(f"[{video_id}] Generated {len(chunks)} chunks")

            # Generate embeddings for all chunks
            chunk_texts = [c[0] for c in chunks]
            embeddings = self.embedder.embed_batch(chunk_texts)

            # Prepare points for Qdrant
            from qdrant_client.http.models import PointStruct

            points = []
            for i, ((chunk_text, start_time_sec), embedding) in enumerate(zip(chunks, embeddings)):
                point_id = str(uuid.uuid4())

                payload = {
                    "video_id": video_id,
                    "title": metadata.get("title", ""),
                    "channel": metadata.get("channel", ""),
                    "chunk_text": chunk_text,
                    "chunk_index": i,
                    "timestamp": start_time_sec,
                    "total_chunks": len(chunks),
                    "language": metadata.get("language", "en"),
                    "content_type": "transcript_chunk",
                    "ingested_at": datetime.utcnow().isoformat() + "Z"
                }

                # Add any extra metadata
                for key in ["duration", "published_at", "description", "tags"]:
                    if key in metadata:
                        payload[key] = metadata[key]

                points.append(PointStruct(
                    id=point_id,
                    vector=embedding,
                    payload=payload
                ))

            # Upsert to Qdrant
            self.client.upsert(
                collection_name=self.COLLECTION_NAME,
                points=points,
                wait=True
            )

            elapsed = int((time.time() - start_time) * 1000)

            logger.info(f"[{video_id}] Ingested {len(points)} chunks in {elapsed}ms")

            return IngestionResult(
                success=True,
                video_id=video_id,
                chunks_stored=len(points),
                ingestion_time_ms=elapsed
            )

        except Exception as e:
            logger.error(f"[{video_id}] Ingestion failed: {e}")
            return IngestionResult(
                success=False,
                video_id=video_id,
                error=str(e),
                ingestion_time_ms=int((time.time() - start_time) * 1000)
            )

    def ingest_entities(
        self,
        video_id: str,
        entities: List[Dict[str, Any]]
    ) -> IngestionResult:
        """
        Ingest extracted entities into Qdrant.

        Stores entity embeddings for knowledge graph search.

        Args:
            video_id: Source video ID
            entities: List of entities with name, type, description

        Returns:
            IngestionResult with status and counts
        """
        start_time = time.time()

        if not entities:
            return IngestionResult(
                success=True,
                video_id=video_id,
                entities_stored=0
            )

        try:
            from qdrant_client.http.models import PointStruct

            # Generate embeddings for entity descriptions
            entity_texts = []
            for entity in entities:
                # Combine name and description for richer embedding
                text = f"{entity.get('name', '')}: {entity.get('description', '')}"
                if not text.strip() or text == ": ":
                    text = entity.get('name', 'Unknown entity')
                entity_texts.append(text)

            embeddings = self.embedder.embed_batch(entity_texts)

            # Prepare points
            points = []
            for entity, embedding in zip(entities, embeddings):
                point_id = str(uuid.uuid4())

                payload = {
                    "video_id": video_id,
                    "entity_name": entity.get("name", ""),
                    "entity_type": entity.get("type", "concept"),
                    "description": entity.get("description", ""),
                    "content_type": "entity",
                    "confidence": entity.get("confidence", 1.0),
                    "properties": entity.get("properties", {}),
                    "ingested_at": datetime.utcnow().isoformat() + "Z"
                }

                points.append(PointStruct(
                    id=point_id,
                    vector=embedding,
                    payload=payload
                ))

            # Upsert to Qdrant
            self.client.upsert(
                collection_name=self.COLLECTION_NAME,
                points=points,
                wait=True
            )

            elapsed = int((time.time() - start_time) * 1000)

            logger.info(f"[{video_id}] Ingested {len(points)} entities in {elapsed}ms")

            return IngestionResult(
                success=True,
                video_id=video_id,
                entities_stored=len(points),
                ingestion_time_ms=elapsed
            )

        except Exception as e:
            logger.error(f"[{video_id}] Entity ingestion failed: {e}")
            return IngestionResult(
                success=False,
                video_id=video_id,
                error=str(e),
                ingestion_time_ms=int((time.time() - start_time) * 1000)
            )

    def search(
        self,
        query: str,
        limit: int = 10,
        content_type: Optional[str] = None,
        video_id: Optional[str] = None,
        min_score: float = 0.0
    ) -> List[SearchResult]:
        """
        Semantic search across stored content.

        Args:
            query: Search query text
            limit: Maximum results to return
            content_type: Filter by type ("transcript_chunk" or "entity")
            video_id: Filter by specific video
            min_score: Minimum similarity score threshold

        Returns:
            List of SearchResult objects sorted by relevance
        """
        try:
            from qdrant_client.http.models import Filter, FieldCondition, MatchValue

            # Generate query embedding
            query_embedding = self.embedder.embed(query)

            # Build filter
            conditions = []

            if content_type:
                conditions.append(
                    FieldCondition(
                        key="content_type",
                        match=MatchValue(value=content_type)
                    )
                )

            if video_id:
                conditions.append(
                    FieldCondition(
                        key="video_id",
                        match=MatchValue(value=video_id)
                    )
                )

            query_filter = Filter(must=conditions) if conditions else None

            # Search using query_points (newer qdrant-client API)
            results = self.client.query_points(
                collection_name=self.COLLECTION_NAME,
                query=query_embedding,
                query_filter=query_filter,
                limit=limit,
                score_threshold=min_score,
                with_payload=True
            )

            # Handle different response formats
            points = results.points if hasattr(results, 'points') else results

            # Convert to SearchResult objects
            search_results = []
            for r in points:
                payload = r.payload or {}

                # Determine text based on content type
                if payload.get("content_type") == "entity":
                    text = f"{payload.get('entity_name', '')}: {payload.get('description', '')}"
                else:
                    text = payload.get("chunk_text", "")

                search_results.append(SearchResult(
                    id=str(r.id),
                    video_id=payload.get("video_id", ""),
                    text=text,
                    score=r.score,
                    title=payload.get("title", ""),
                    channel=payload.get("channel", ""),
                    chunk_index=payload.get("chunk_index", 0),
                    timestamp=payload.get("timestamp", 0.0),
                    entity_type=payload.get("entity_type"),
                    payload=payload
                ))

            logger.info(f"Search '{query[:50]}...' returned {len(search_results)} results")

            return search_results

        except Exception as e:
            logger.error(f"Search failed: {e}")
            return []

    def get_similar_videos(
        self,
        video_id: str,
        limit: int = 5
    ) -> List[Dict[str, Any]]:
        """
        Find videos similar to a given video.

        Uses the video's transcript embeddings to find similar content
        in other videos.

        Args:
            video_id: Source video ID
            limit: Number of similar videos to return

        Returns:
            List of similar videos with similarity scores
        """
        try:
            from qdrant_client.http.models import Filter, FieldCondition, MatchValue

            # First, get all chunks from the source video
            source_filter = Filter(
                must=[
                    FieldCondition(
                        key="video_id",
                        match=MatchValue(value=video_id)
                    ),
                    FieldCondition(
                        key="content_type",
                        match=MatchValue(value="transcript_chunk")
                    )
                ]
            )

            source_points, _ = self.client.scroll(
                collection_name=self.COLLECTION_NAME,
                scroll_filter=source_filter,
                limit=100,
                with_vectors=True,
                with_payload=True
            )

            if not source_points:
                logger.warning(f"No content found for video {video_id}")
                return []

            # Compute average embedding for the video
            vectors = [p.vector for p in source_points if p.vector]
            if not vectors:
                return []

            avg_vector = [
                sum(v[i] for v in vectors) / len(vectors)
                for i in range(len(vectors[0]))
            ]

            # Search for similar content, excluding source video
            exclude_filter = Filter(
                must_not=[
                    FieldCondition(
                        key="video_id",
                        match=MatchValue(value=video_id)
                    )
                ],
                must=[
                    FieldCondition(
                        key="content_type",
                        match=MatchValue(value="transcript_chunk")
                    )
                ]
            )

            # Use query_points for newer qdrant-client
            results = self.client.query_points(
                collection_name=self.COLLECTION_NAME,
                query=avg_vector,
                query_filter=exclude_filter,
                limit=limit * 5,  # Get more to aggregate by video
                with_payload=True
            )

            # Handle different response formats
            points = results.points if hasattr(results, 'points') else results

            # Aggregate by video_id
            video_scores = {}
            for r in points:
                payload = r.payload or {}
                vid = payload.get("video_id", "")

                if vid not in video_scores:
                    video_scores[vid] = {
                        "video_id": vid,
                        "title": payload.get("title", ""),
                        "channel": payload.get("channel", ""),
                        "scores": [],
                        "match_count": 0
                    }

                video_scores[vid]["scores"].append(r.score)
                video_scores[vid]["match_count"] += 1

            # Calculate average score per video
            similar_videos = []
            for vid, data in video_scores.items():
                avg_score = sum(data["scores"]) / len(data["scores"])
                similar_videos.append({
                    "video_id": data["video_id"],
                    "title": data["title"],
                    "channel": data["channel"],
                    "similarity_score": round(avg_score, 4),
                    "matching_chunks": data["match_count"]
                })

            # Sort by score and return top N
            similar_videos.sort(key=lambda x: x["similarity_score"], reverse=True)

            return similar_videos[:limit]

        except Exception as e:
            logger.error(f"Similar videos search failed: {e}")
            return []

    def delete_video(self, video_id: str) -> bool:
        """Delete all content for a video."""
        try:
            from qdrant_client.http.models import Filter, FieldCondition, MatchValue

            self.client.delete(
                collection_name=self.COLLECTION_NAME,
                points_selector=Filter(
                    must=[
                        FieldCondition(
                            key="video_id",
                            match=MatchValue(value=video_id)
                        )
                    ]
                )
            )

            logger.info(f"Deleted all content for video {video_id}")
            return True

        except Exception as e:
            logger.error(f"Delete failed: {e}")
            return False

    def get_stats(self) -> Dict[str, Any]:
        """Get collection statistics."""
        try:
            info = self.client.get_collection(self.COLLECTION_NAME)

            # Handle different qdrant-client versions
            vectors_count = getattr(info, 'vectors_count', None)
            if vectors_count is None:
                vectors_count = getattr(info, 'points_count', 0)

            points_count = getattr(info, 'points_count', vectors_count)

            status = info.status
            if hasattr(status, 'value'):
                status = status.value
            else:
                status = str(status)

            return {
                "collection": self.COLLECTION_NAME,
                "vectors_count": vectors_count or 0,
                "points_count": points_count or 0,
                "status": status,
                "vector_size": self.VECTOR_SIZE
            }

        except Exception as e:
            return {"error": str(e)}

    def health_check(self) -> bool:
        """Check Qdrant connection health."""
        try:
            self.client.get_collections()
            return True
        except Exception:
            return False


# Singleton instance
_ingester = None


def get_ingester() -> QdrantVideoIngester:
    """Get or create the QdrantVideoIngester singleton."""
    global _ingester
    if _ingester is None:
        _ingester = QdrantVideoIngester()
    return _ingester


async def main():
    """CLI for testing the ingester."""
    import argparse

    parser = argparse.ArgumentParser(description="YouTube Qdrant Ingestion Tool")
    subparsers = parser.add_subparsers(dest="command", help="Commands")

    # Health check
    health_parser = subparsers.add_parser("health", help="Check Qdrant health")

    # Stats
    stats_parser = subparsers.add_parser("stats", help="Get collection stats")

    # Search
    search_parser = subparsers.add_parser("search", help="Search content")
    search_parser.add_argument("query", help="Search query")
    search_parser.add_argument("--limit", type=int, default=5, help="Result limit")

    # Similar videos
    similar_parser = subparsers.add_parser("similar", help="Find similar videos")
    similar_parser.add_argument("video_id", help="Source video ID")
    similar_parser.add_argument("--limit", type=int, default=5, help="Result limit")

    # Ingest test
    ingest_parser = subparsers.add_parser("test-ingest", help="Test ingestion")
    ingest_parser.add_argument("--video-id", default="test123", help="Test video ID")

    args = parser.parse_args()

    if args.command == "health":
        ingester = get_ingester()
        healthy = ingester.health_check()
        print(f"Qdrant Health: {'HEALTHY' if healthy else 'UNHEALTHY'}")

    elif args.command == "stats":
        ingester = get_ingester()
        stats = ingester.get_stats()
        print(json.dumps(stats, indent=2))

    elif args.command == "search":
        ingester = get_ingester()
        results = ingester.search(args.query, limit=args.limit)
        print(f"\nSearch: '{args.query}'")
        print("=" * 60)
        for r in results:
            print(f"\n[{r.score:.3f}] {r.title or r.video_id}")
            print(f"  Video: {r.video_id} @ {r.timestamp:.1f}s")
            print(f"  {r.text[:200]}...")

    elif args.command == "similar":
        ingester = get_ingester()
        similar = ingester.get_similar_videos(args.video_id, limit=args.limit)
        print(f"\nVideos similar to: {args.video_id}")
        print("=" * 60)
        for v in similar:
            print(f"\n[{v['similarity_score']:.3f}] {v['title'] or v['video_id']}")
            print(f"  Channel: {v['channel']}")
            print(f"  Matching chunks: {v['matching_chunks']}")

    elif args.command == "test-ingest":
        ingester = get_ingester()

        # Test data
        test_text = """
        Welcome to this comprehensive guide on building voice AI agents.
        In this video, we'll explore the fundamentals of conversational AI
        and how to implement voice interfaces using modern tools like Vapi
        and Telnyx. We'll cover topics including natural language processing,
        speech recognition, and voice synthesis.

        First, let's understand the architecture. A typical voice AI system
        consists of three main components: the speech-to-text engine,
        the language model for understanding and generating responses,
        and the text-to-speech engine for producing natural voice output.

        The key to building effective voice agents is understanding user intent.
        This requires training your models on domain-specific data and
        implementing robust error handling for edge cases.

        In the next section, we'll look at practical implementation patterns
        using Python and modern AI APIs. Stay tuned for code examples
        and best practices for production deployment.
        """

        result = ingester.ingest_transcript(
            video_id=args.video_id,
            text=test_text,
            metadata={
                "title": "Building Voice AI Agents - Complete Guide",
                "channel": "Genesis AI",
                "language": "en"
            }
        )

        print(f"\nIngestion Result:")
        print(f"  Success: {result.success}")
        print(f"  Chunks stored: {result.chunks_stored}")
        print(f"  Time: {result.ingestion_time_ms}ms")

        if result.error:
            print(f"  Error: {result.error}")

        # Test entity ingestion
        entities = [
            {"name": "Vapi", "type": "tool", "description": "Voice AI platform for building conversational agents"},
            {"name": "Telnyx", "type": "service", "description": "Telephony API provider for voice and SMS"},
            {"name": "NLP", "type": "concept", "description": "Natural Language Processing for understanding text"}
        ]

        entity_result = ingester.ingest_entities(args.video_id, entities)
        print(f"\nEntity Ingestion:")
        print(f"  Success: {entity_result.success}")
        print(f"  Entities stored: {entity_result.entities_stored}")

    else:
        parser.print_help()


if __name__ == "__main__":
    import asyncio
    asyncio.run(main())
