"""
AIVA 3-Tier Memory Architecture

Unified memory interface coordinating working, episodic, and semantic memory.

Tiers:
1. Working Memory (Redis) - 8-32 items, 1hr TTL, LRU eviction
2. Episodic Memory (PostgreSQL) - 90-day hot retention, then archived
3. Semantic Memory (Qdrant) - Indefinite vector storage

VERIFICATION_STAMP
Story: AIVA-003
Verified By: Claude Code Agent
Verified At: 2026-01-26
Tests: See test_memory_architecture.py
Coverage: Full black-box and white-box testing
"""

from typing import Any, Dict, List, Optional
from datetime import datetime

from .working_memory import WorkingMemory
from .episodic_memory import EpisodicMemory
from .semantic_memory import SemanticMemory
from .memory_consolidator import MemoryConsolidator, SurpriseScorer

# Titan Memory Integration (Story AIVA-004)
from .titan_connector import (
    TitanConnector,
    TitanMemoryEntry,
    SyncStatus,
    get_titan_connector
)
from .titan_sync import (
    TitanSyncManager,
    RetryQueueEntry,
    ConflictResolution,
    get_sync_manager
)

# Memory Gate & Decision Context (Story AIVA-MEMGATE)
from .memory_gate import (
    MemoryGate,
    MemoryTier,
    BackendStatus,
    MemoryResult,
    GateStatus,
    get_memory_gate
)
from .decision_context import (
    DecisionContextBuilder,
    DecisionContext,
    ContextItem,
    build_decision_context,
    get_context_for_prompt
)


class MemoryManager:
    """
    Unified memory manager coordinating all three tiers.

    Provides single interface for memory operations with automatic
    tier selection and cross-tier queries.
    """

    def __init__(self):
        """Initialize memory manager with all three tiers."""
        self.working = WorkingMemory()
        self.episodic = EpisodicMemory()
        self.semantic = SemanticMemory()
        self.consolidator = MemoryConsolidator(
            self.working,
            self.episodic,
            self.semantic
        )

    def store(
        self,
        content: Any,
        event_type: str = "general",
        session_id: Optional[str] = None,
        key: Optional[str] = None,
        force_consolidate: bool = False
    ) -> Dict[str, Any]:
        """
        Store information in appropriate memory tier(s).

        Automatically routes to tiers based on surprise score.

        Args:
            content: Data to store
            event_type: Type of event/content
            session_id: Optional session identifier
            key: Optional key for working memory
            force_consolidate: Force storage in episodic tier

        Returns:
            Dict with storage results and tier locations
        """
        content_dict = content if isinstance(content, dict) else {'data': content}

        result = self.consolidator.consolidate_event(
            event_type=event_type,
            content=content_dict,
            session_id=session_id,
            force_consolidate=force_consolidate
        )

        return result

    def query(
        self,
        query: str,
        query_type: str = "auto",
        limit: int = 10,
        embedding: Optional[List[float]] = None
    ) -> Dict[str, List[Dict]]:
        """
        Query across memory tiers with automatic tier selection.

        Search order: working → semantic → episodic
        Results are merged with deduplication.

        Args:
            query: Search query string
            query_type: 'auto', 'recent', 'semantic', or 'historical'
            limit: Maximum results per tier
            embedding: Optional query embedding for semantic search

        Returns:
            Dict with results from each tier
        """
        results = {
            'working': [],
            'semantic': [],
            'episodic': [],
            'merged': []
        }

        # Search working memory first (fastest)
        try:
            results['working'] = self.working.search(query, limit=limit)
        except Exception as e:
            # Graceful degradation
            pass

        # Search semantic memory if embedding provided or type is semantic
        if embedding or query_type in ('auto', 'semantic'):
            try:
                if embedding:
                    results['semantic'] = self.semantic.retrieve_similar(
                        query_embedding=embedding,
                        limit=limit
                    )
            except Exception as e:
                # Graceful degradation
                pass

        # Search episodic memory for historical queries
        if query_type in ('auto', 'historical'):
            try:
                results['episodic'] = self.episodic.search_by_content(
                    query=query,
                    limit=limit
                )
            except Exception as e:
                # Graceful degradation
                pass

        # Merge results with deduplication
        results['merged'] = self._merge_results(
            results['working'],
            results['semantic'],
            results['episodic'],
            limit=limit
        )

        return results

    def recall_recent(self, limit: int = 20) -> List[Dict]:
        """
        Recall recent memories from working tier.

        Args:
            limit: Maximum items to return

        Returns:
            List of recent memory items
        """
        return self.working.get_context(limit=limit)

    def recall_episode(self, episode_id: str) -> Optional[Dict]:
        """
        Recall specific episode by ID.

        Args:
            episode_id: Episode UUID

        Returns:
            Episode dict or None
        """
        try:
            return self.episodic.recall(episode_id)
        except Exception:
            return None

    def recall_similar(
        self,
        embedding: List[float],
        limit: int = 10,
        score_threshold: float = 0.7
    ) -> List[Dict]:
        """
        Recall semantically similar memories.

        Args:
            embedding: Query embedding vector
            limit: Maximum results
            score_threshold: Minimum similarity score

        Returns:
            List of similar memories
        """
        try:
            return self.semantic.retrieve_similar(
                query_embedding=embedding,
                limit=limit,
                score_threshold=score_threshold
            )
        except Exception:
            return []

    def consolidate(self) -> Dict[str, int]:
        """
        Perform scheduled memory consolidation.

        Returns:
            Dict with consolidation statistics
        """
        return self.consolidator.daily_consolidation()

    def get_status(self) -> Dict:
        """
        Get status of all memory tiers.

        Returns:
            Dict with statistics from each tier
        """
        try:
            return self.consolidator.get_consolidation_stats()
        except Exception as e:
            return {
                'error': str(e),
                'working_memory': {},
                'episodic_memory': {},
                'semantic_memory': {}
            }

    def _merge_results(
        self,
        working_results: List[Dict],
        semantic_results: List[Dict],
        episodic_results: List[Dict],
        limit: int = 10
    ) -> List[Dict]:
        """
        Merge results from multiple tiers with deduplication.

        Priority: working > semantic > episodic
        """
        merged = []
        seen_content = set()

        # Helper to get content hash for deduplication
        def content_hash(item: Dict) -> str:
            import json
            content = item.get('value') or item.get('content', {})
            return json.dumps(content, sort_keys=True)

        # Add working memory results first
        for item in working_results:
            h = content_hash(item)
            if h not in seen_content:
                merged.append({'source': 'working', **item})
                seen_content.add(h)

        # Add semantic results
        for item in semantic_results:
            h = content_hash(item)
            if h not in seen_content:
                merged.append({'source': 'semantic', **item})
                seen_content.add(h)

        # Add episodic results
        for item in episodic_results:
            h = content_hash(item)
            if h not in seen_content:
                merged.append({'source': 'episodic', **item})
                seen_content.add(h)

        return merged[:limit]

    def close(self):
        """Close all tier connections."""
        try:
            self.episodic.close()
        except Exception:
            pass


# Convenience exports
__all__ = [
    # 3-Tier Memory Architecture (AIVA-003)
    'MemoryManager',
    'WorkingMemory',
    'EpisodicMemory',
    'SemanticMemory',
    'MemoryConsolidator',
    'SurpriseScorer',

    # Titan Memory Integration (AIVA-004)
    'TitanConnector',
    'TitanMemoryEntry',
    'SyncStatus',
    'get_titan_connector',
    'TitanSyncManager',
    'RetryQueueEntry',
    'ConflictResolution',
    'get_sync_manager',

    # Memory Gate & Decision Context (AIVA-MEMGATE)
    'MemoryGate',
    'MemoryTier',
    'BackendStatus',
    'MemoryResult',
    'GateStatus',
    'get_memory_gate',
    'DecisionContextBuilder',
    'DecisionContext',
    'ContextItem',
    'build_decision_context',
    'get_context_for_prompt',
]
