#!/usr/bin/env python3
"""
AIVA Consciousness Integrator - Phase 5: Creator Mind Integration
==================================================================
Stories 45-50: Inject creator knowledge into AIVA's memory systems

This module:
- Story 45: Semantic Memory Injector - Loads and injects high-value entities/axioms
- Story 46: Kinan Understanding Query API - Query interface for creator knowledge
- Story 47: Context-Aware Response Generator - Inject creator context into prompts
- Story 48: Philosophy Emulation Engine - Emulate Kinan's questioning approach
- Story 49: Evolution Narrator - Generate narratives from timeline events
- Story 50: Self-Reflection Interface - Gap analysis and uncertainty quantification
"""

import json
import os
import sys
from pathlib import Path
from datetime import datetime
from typing import Dict, List, Any, Optional, Tuple
from dataclasses import dataclass, asdict
import random
import hashlib

# Add parent paths
sys.path.insert(0, str(Path(__file__).parent.parent))

# Knowledge paths
CREATOR_MIND_PATH = Path("/mnt/e/genesis-system/KNOWLEDGE_GRAPH/creator_mind")


@dataclass
class InjectionResult:
    """Result of memory injection operation."""
    success: bool
    items_injected: int
    items_skipped: int
    items_failed: int
    tier_distribution: Dict[str, int]
    sample_injected: List[str]


@dataclass
class QueryResult:
    """Result of a creator knowledge query."""
    query: str
    matches: List[Dict[str, Any]]
    source_files: List[str]
    relevance_scores: List[float]


@dataclass
class EmulationContext:
    """Context for Kinan-style emulation."""
    questioning_style: str
    depth_preference: str
    socratic_tendency: float
    sample_questions: List[str]
    philosophy_snippets: List[str]
    vision_statements: List[str]


class CreatorMindLoader:
    """Loads creator mind knowledge from JSONL files."""

    def __init__(self, base_path: Path = CREATOR_MIND_PATH):
        self.base_path = base_path
        self._cache = {}

    def load_jsonl(self, relative_path: str) -> List[Dict]:
        """Load a JSONL file and return list of records."""
        full_path = self.base_path / relative_path
        if not full_path.exists():
            return []

        cache_key = str(full_path)
        if cache_key in self._cache:
            return self._cache[cache_key]

        records = []
        try:
            with open(full_path, 'r', encoding='utf-8') as f:
                for line in f:
                    line = line.strip()
                    if line:
                        try:
                            records.append(json.loads(line))
                        except json.JSONDecodeError:
                            continue
        except Exception as e:
            print(f"[!] Error loading {relative_path}: {e}")

        self._cache[cache_key] = records
        return records

    def load_json(self, relative_path: str) -> Dict:
        """Load a JSON file and return the object."""
        full_path = self.base_path / relative_path
        if not full_path.exists():
            return {}

        try:
            with open(full_path, 'r', encoding='utf-8') as f:
                return json.load(f)
        except Exception as e:
            print(f"[!] Error loading {relative_path}: {e}")
            return {}

    def get_axioms(self, min_confidence: float = 0.7) -> List[Dict]:
        """Get axioms above confidence threshold."""
        axioms = self.load_jsonl("axioms/creator_axioms.jsonl")
        return [a for a in axioms if a.get('confidence', 0) >= min_confidence]

    def get_entities(self, entity_types: List[str] = None) -> List[Dict]:
        """Get entities, optionally filtered by type."""
        entities = self.load_jsonl("entities/all_entities.jsonl")
        if entity_types:
            entities = [e for e in entities if e.get('type') in entity_types]
        return entities

    def get_business_ideas(self, min_mentions: int = 1) -> List[Dict]:
        """Get business ideas with minimum mention count."""
        ideas = self.load_jsonl("revenue/business_ideas.jsonl")
        return [i for i in ideas if i.get('mentions', 0) >= min_mentions]

    def get_philosophy(self) -> List[Dict]:
        """Get philosophy statements."""
        return self.load_jsonl("philosophy/philosophy_statements.jsonl")

    def get_visions(self) -> List[Dict]:
        """Get vision statements."""
        return self.load_jsonl("philosophy/vision_statements.jsonl")

    def get_questioning_profile(self) -> Dict:
        """Get Kinan's questioning profile."""
        return self.load_json("philosophy/questioning_profile.json")

    def get_timeline(self) -> List[Dict]:
        """Get evolution timeline events."""
        return self.load_jsonl("timeline/evolution_events.jsonl")

    def get_failures(self) -> List[Dict]:
        """Get failure patterns."""
        return self.load_jsonl("patterns/failure_patterns.jsonl")

    def get_successes(self) -> List[Dict]:
        """Get success patterns."""
        return self.load_jsonl("patterns/success_patterns.jsonl")

    def get_mental_models(self) -> List[Dict]:
        """Get mental models and analogies."""
        return self.load_jsonl("patterns/mental_models.jsonl")

    def get_graph_summary(self) -> Dict:
        """Get knowledge graph summary."""
        return self.load_json("graph_summary.json")


class SemanticMemoryInjector:
    """Story 45: Inject creator knowledge into AIVA's semantic memory."""

    def __init__(self, loader: CreatorMindLoader = None):
        self.loader = loader or CreatorMindLoader()
        self.cortex = None
        self._init_cortex()

    def _init_cortex(self):
        """Initialize connection to Memory Cortex."""
        try:
            from genesis_memory_cortex import MemoryCortex, MemoryTier
            self.cortex = MemoryCortex(enable_vectors=True)
            self.MemoryTier = MemoryTier
            print("[OK] Connected to Memory Cortex")
        except Exception as e:
            print(f"[!] Could not connect to Memory Cortex: {e}")
            self.cortex = None

    def _is_meaningful_content(self, content: str, min_words: int = 5) -> bool:
        """Check if content is meaningful (not just fragments)."""
        if not content:
            return False

        # Filter out system/UI fragments
        noise_patterns = [
            "?? ", "?? ", "spec.md", ".html?", ".js.download",
            "variant-", "^C", "$f in", "EOF", "```"
        ]
        for pattern in noise_patterns:
            if pattern in content:
                return False

        # Check word count
        words = content.split()
        if len(words) < min_words:
            return False

        # Check for sentence-like structure
        if not any(content.endswith(c) for c in '.!?,"'):
            # Allow if it's a complete thought (starts with capital, decent length)
            if not (content[0].isupper() and len(words) >= 8):
                return False

        return True

    def _prepare_for_injection(self, item: Dict, item_type: str) -> Optional[Dict]:
        """Prepare an item for memory injection."""
        content = item.get('statement') or item.get('name') or item.get('description') or ''

        # Clean and validate content
        if not self._is_meaningful_content(content, min_words=5):
            return None

        # Build metadata
        metadata = {
            "creator_mind_type": item_type,
            "original_id": item.get('id', ''),
            "confidence": item.get('confidence', 0.5),
            "source": "creator_mind_absorption",
            "absorption_date": datetime.now().isoformat()
        }

        # Add type-specific metadata
        if item_type == "axiom":
            metadata["axiom_type"] = item.get('type', 'general')
            metadata["source_entities"] = item.get('source_entities', [])
        elif item_type == "philosophy":
            metadata["philosophy_type"] = item.get('type', 'belief')
            metadata["context"] = item.get('context', '')
        elif item_type == "vision":
            metadata["vision_type"] = item.get('vision_type', 'strategic')
        elif item_type == "business_idea":
            metadata["mentions"] = item.get('mentions', 1)
            metadata["revenue_potential"] = item.get('revenue_potential', 'unknown')
        elif item_type == "failure":
            metadata["failure_type"] = item.get('failure_type', 'unknown')
            metadata["lesson"] = item.get('lesson', '')
        elif item_type == "success":
            metadata["success_type"] = item.get('success_type', 'unknown')
            metadata["replication_guidance"] = item.get('replication_guidance', '')

        return {
            "content": content,
            "domain": f"creator_mind.{item_type}",
            "metadata": metadata,
            "score": item.get('confidence', 0.7)
        }

    def inject_axioms(self, min_confidence: float = 0.7) -> InjectionResult:
        """Inject high-confidence axioms into semantic memory."""
        axioms = self.loader.get_axioms(min_confidence)
        return self._inject_items(axioms, "axiom")

    def inject_philosophy(self) -> InjectionResult:
        """Inject philosophy statements into semantic memory."""
        philosophy = self.loader.get_philosophy()
        return self._inject_items(philosophy, "philosophy")

    def inject_visions(self) -> InjectionResult:
        """Inject vision statements into semantic memory."""
        visions = self.loader.get_visions()
        return self._inject_items(visions, "vision")

    def inject_business_ideas(self, min_mentions: int = 2) -> InjectionResult:
        """Inject business ideas into semantic memory."""
        ideas = self.loader.get_business_ideas(min_mentions)
        return self._inject_items(ideas, "business_idea")

    def inject_failures(self) -> InjectionResult:
        """Inject failure patterns into semantic memory."""
        failures = self.loader.get_failures()
        return self._inject_items(failures, "failure")

    def inject_successes(self) -> InjectionResult:
        """Inject success patterns into semantic memory."""
        successes = self.loader.get_successes()
        return self._inject_items(successes, "success")

    def _inject_items(self, items: List[Dict], item_type: str) -> InjectionResult:
        """Inject a list of items into semantic memory."""
        injected = 0
        skipped = 0
        failed = 0
        tier_dist = {}
        samples = []

        for item in items:
            prepared = self._prepare_for_injection(item, item_type)
            if not prepared:
                skipped += 1
                continue

            if self.cortex:
                try:
                    result = self.cortex.remember(
                        content=prepared["content"],
                        source="creator_mind_absorption",
                        domain=prepared["domain"],
                        metadata=prepared["metadata"],
                        force_tier=self.MemoryTier.SEMANTIC
                    )
                    tier = result.get("tier", "unknown")
                    tier_dist[tier] = tier_dist.get(tier, 0) + 1
                    injected += 1

                    if len(samples) < 5:
                        samples.append(prepared["content"][:80] + "...")
                except Exception as e:
                    failed += 1
                    print(f"[!] Injection failed: {e}")
            else:
                # Dry run mode
                injected += 1
                if len(samples) < 5:
                    samples.append(prepared["content"][:80] + "...")

        return InjectionResult(
            success=failed == 0,
            items_injected=injected,
            items_skipped=skipped,
            items_failed=failed,
            tier_distribution=tier_dist,
            sample_injected=samples
        )

    def run_full_injection(self) -> Dict[str, InjectionResult]:
        """Run full creator mind injection into semantic memory."""
        print("\n[PHASE 5 - Story 45] Semantic Memory Injection")
        print("=" * 60)

        results = {}

        # Inject axioms
        print("\n[1/6] Injecting axioms...")
        results["axioms"] = self.inject_axioms(min_confidence=0.7)
        print(f"      Injected: {results['axioms'].items_injected}, Skipped: {results['axioms'].items_skipped}")

        # Inject philosophy
        print("\n[2/6] Injecting philosophy statements...")
        results["philosophy"] = self.inject_philosophy()
        print(f"      Injected: {results['philosophy'].items_injected}, Skipped: {results['philosophy'].items_skipped}")

        # Inject visions
        print("\n[3/6] Injecting vision statements...")
        results["visions"] = self.inject_visions()
        print(f"      Injected: {results['visions'].items_injected}, Skipped: {results['visions'].items_skipped}")

        # Inject business ideas
        print("\n[4/6] Injecting business ideas...")
        results["business_ideas"] = self.inject_business_ideas(min_mentions=2)
        print(f"      Injected: {results['business_ideas'].items_injected}, Skipped: {results['business_ideas'].items_skipped}")

        # Inject failures
        print("\n[5/6] Injecting failure patterns...")
        results["failures"] = self.inject_failures()
        print(f"      Injected: {results['failures'].items_injected}, Skipped: {results['failures'].items_skipped}")

        # Inject successes
        print("\n[6/6] Injecting success patterns...")
        results["successes"] = self.inject_successes()
        print(f"      Injected: {results['successes'].items_injected}, Skipped: {results['successes'].items_skipped}")

        # Summary
        total_injected = sum(r.items_injected for r in results.values())
        total_skipped = sum(r.items_skipped for r in results.values())
        total_failed = sum(r.items_failed for r in results.values())

        print("\n" + "=" * 60)
        print(f"[COMPLETE] Total Injected: {total_injected}")
        print(f"           Total Skipped: {total_skipped}")
        print(f"           Total Failed: {total_failed}")

        return results


class KinanUnderstandingAPI:
    """Story 46: Query API for creator knowledge."""

    def __init__(self, loader: CreatorMindLoader = None):
        self.loader = loader or CreatorMindLoader()

    def query_beliefs(self, keywords: List[str] = None, limit: int = 10) -> QueryResult:
        """Query Kinan's beliefs."""
        philosophy = self.loader.get_philosophy()
        beliefs = [p for p in philosophy if p.get('type') == 'belief']

        if keywords:
            beliefs = self._filter_by_keywords(beliefs, keywords)

        return QueryResult(
            query=f"beliefs:{keywords}",
            matches=beliefs[:limit],
            source_files=["philosophy/philosophy_statements.jsonl"],
            relevance_scores=[0.8] * min(len(beliefs), limit)
        )

    def query_visions(self, topic: str = None, limit: int = 10) -> QueryResult:
        """Query Kinan's visions."""
        visions = self.loader.get_visions()

        if topic:
            visions = self._filter_by_keywords(visions, [topic])

        return QueryResult(
            query=f"visions:{topic}",
            matches=visions[:limit],
            source_files=["philosophy/vision_statements.jsonl"],
            relevance_scores=[0.8] * min(len(visions), limit)
        )

    def query_business_ideas(self, domain: str = None, min_mentions: int = 1) -> QueryResult:
        """Query business ideas."""
        ideas = self.loader.get_business_ideas(min_mentions)

        if domain:
            ideas = self._filter_by_keywords(ideas, [domain])

        return QueryResult(
            query=f"business_ideas:{domain}",
            matches=ideas,
            source_files=["revenue/business_ideas.jsonl"],
            relevance_scores=[i.get('mentions', 1) / 10 for i in ideas]
        )

    def query_decisions(self, context: str = None, limit: int = 20) -> QueryResult:
        """Query Kinan's decisions."""
        entities = self.loader.get_entities(["decision"])

        if context:
            entities = self._filter_by_keywords(entities, [context])

        return QueryResult(
            query=f"decisions:{context}",
            matches=entities[:limit],
            source_files=["entities/all_entities.jsonl"],
            relevance_scores=[0.7] * min(len(entities), limit)
        )

    def query_failures(self, domain: str = None) -> QueryResult:
        """Query failure patterns to avoid."""
        failures = self.loader.get_failures()

        if domain:
            failures = self._filter_by_keywords(failures, [domain])

        return QueryResult(
            query=f"failures:{domain}",
            matches=failures,
            source_files=["patterns/failure_patterns.jsonl"],
            relevance_scores=[0.9] * len(failures)
        )

    def query_successes(self, domain: str = None) -> QueryResult:
        """Query success patterns to replicate."""
        successes = self.loader.get_successes()

        if domain:
            successes = self._filter_by_keywords(successes, [domain])

        return QueryResult(
            query=f"successes:{domain}",
            matches=successes,
            source_files=["patterns/success_patterns.jsonl"],
            relevance_scores=[0.9] * len(successes)
        )

    def get_creator_summary(self) -> Dict:
        """Get a comprehensive summary of creator knowledge."""
        summary = self.loader.get_graph_summary()
        profile = self.loader.get_questioning_profile()

        return {
            "graph_stats": summary,
            "questioning_profile": profile,
            "total_entities": summary.get("entity_count", 0),
            "total_axioms": summary.get("axiom_count", 0),
            "timeline_events": summary.get("timeline_event_count", 0),
            "knowledge_span": "May 2025 - January 2026",
            "conversations_analyzed": 615,
            "words_processed": 706592
        }

    def _filter_by_keywords(self, items: List[Dict], keywords: List[str]) -> List[Dict]:
        """Filter items by keyword presence."""
        keywords_lower = [k.lower() for k in keywords]
        filtered = []

        for item in items:
            item_text = json.dumps(item).lower()
            if any(kw in item_text for kw in keywords_lower):
                filtered.append(item)

        return filtered


class ContextAwareResponseGenerator:
    """Story 47: Inject creator context into prompts."""

    def __init__(self, loader: CreatorMindLoader = None):
        self.loader = loader or CreatorMindLoader()
        self.api = KinanUnderstandingAPI(loader)

    def generate_context_block(self, topic: str, max_items: int = 3) -> str:
        """Generate a context block for prompt injection."""
        context_parts = []

        # Get relevant beliefs
        beliefs = self.api.query_beliefs([topic], limit=max_items)
        if beliefs.matches:
            context_parts.append("**Creator's Relevant Beliefs:**")
            for b in beliefs.matches[:max_items]:
                stmt = b.get('statement', b.get('name', ''))[:200]
                context_parts.append(f"- {stmt}")

        # Get relevant visions
        visions = self.api.query_visions(topic, limit=max_items)
        if visions.matches:
            context_parts.append("\n**Creator's Vision:**")
            for v in visions.matches[:max_items]:
                stmt = v.get('statement', v.get('name', ''))[:200]
                context_parts.append(f"- {stmt}")

        # Get relevant failures to avoid
        failures = self.api.query_failures(topic)
        if failures.matches:
            context_parts.append("\n**Past Failures to Avoid:**")
            for f in failures.matches[:2]:
                stmt = f.get('pattern', f.get('name', ''))[:150]
                context_parts.append(f"- {stmt}")

        if not context_parts:
            return ""

        return "\n".join(context_parts)

    def enhance_prompt(self, original_prompt: str, topics: List[str]) -> str:
        """Enhance a prompt with creator context."""
        context = []

        for topic in topics[:3]:  # Max 3 topics
            block = self.generate_context_block(topic, max_items=2)
            if block:
                context.append(block)

        if not context:
            return original_prompt

        context_section = "\n\n---\n**CREATOR CONTEXT:**\n" + "\n\n".join(context) + "\n---\n\n"

        return context_section + original_prompt


class PhilosophyEmulationEngine:
    """Story 48: Emulate Kinan's questioning approach."""

    def __init__(self, loader: CreatorMindLoader = None):
        self.loader = loader or CreatorMindLoader()
        self._profile = None
        self._philosophy = None
        self._visions = None

    def _load_emulation_data(self):
        """Load data needed for emulation."""
        if self._profile is None:
            self._profile = self.loader.get_questioning_profile()
        if self._philosophy is None:
            self._philosophy = self.loader.get_philosophy()
        if self._visions is None:
            self._visions = self.loader.get_visions()

    def get_emulation_context(self) -> EmulationContext:
        """Get context for Kinan-style emulation."""
        self._load_emulation_data()

        # Extract sample questions
        sample_qs = self._profile.get('sample_questions', {})
        all_samples = []
        for q_type, questions in sample_qs.items():
            all_samples.extend(questions[:3])

        # Extract philosophy snippets
        phil_snippets = [
            p.get('statement', '')[:150]
            for p in self._philosophy[:10]
            if p.get('statement')
        ]

        # Extract vision statements
        vision_stmts = [
            v.get('statement', v.get('name', ''))[:150]
            for v in self._visions[:10]
            if v.get('statement') or v.get('name')
        ]

        return EmulationContext(
            questioning_style=self._determine_style(),
            depth_preference=self._profile.get('depth_preference', 'deep'),
            socratic_tendency=self._profile.get('socratic_tendency', 0.06),
            sample_questions=all_samples,
            philosophy_snippets=phil_snippets,
            vision_statements=vision_stmts
        )

    def _determine_style(self) -> str:
        """Determine Kinan's questioning style."""
        self._load_emulation_data()
        q_types = self._profile.get('question_types', {})

        if not q_types:
            return "strategic"

        # Find dominant question type
        dominant = max(q_types.items(), key=lambda x: x[1])
        return dominant[0]

    def generate_kinan_style_question(self, topic: str) -> str:
        """Generate a question in Kinan's style."""
        self._load_emulation_data()

        # Get sample questions for inspiration
        samples = self._profile.get('sample_questions', {})
        all_samples = []
        for qs in samples.values():
            all_samples.extend(qs)

        # Kinan's questioning patterns
        patterns = [
            f"How can we leverage {topic} for revenue acceleration?",
            f"What would be the autonomous approach to {topic}?",
            f"Can we integrate {topic} with existing Genesis capabilities?",
            f"What's the fastest path to monetizing {topic}?",
            f"How does {topic} align with our patent portfolio strategy?",
            f"Can AIVA learn to master {topic} independently?",
            f"What's the enterprise-grade solution for {topic}?",
            f"How can {topic} be automated with minimal HITL?",
        ]

        return random.choice(patterns)

    def generate_kinan_style_response_guidelines(self) -> str:
        """Generate guidelines for responding in Kinan-aligned way."""
        return """
**KINAN-ALIGNED RESPONSE GUIDELINES:**

1. **Strategic First**: Always consider revenue and scalability implications
2. **Autonomy Focus**: Prefer solutions that reduce human-in-the-loop requirements
3. **Systems Thinking**: Consider how solutions integrate with existing Genesis architecture
4. **Enterprise-Grade**: Solutions should be production-ready, not prototypes
5. **Questioning Spirit**: Challenge assumptions, explore alternatives
6. **Patent Awareness**: Consider IP implications and defensive positioning
7. **Cost Consciousness**: Budget-aware, prefer cost-effective solutions
8. **AUD Currency**: Present costs in Australian dollars unless otherwise specified
9. **Practical Execution**: Favor actionable steps over theoretical discussions
10. **AIVA-Centric**: Consider how insights benefit AIVA's evolution
"""


class EvolutionNarrator:
    """Story 49: Generate narratives from timeline events."""

    def __init__(self, loader: CreatorMindLoader = None):
        self.loader = loader or CreatorMindLoader()

    def get_timeline(self) -> List[Dict]:
        """Get ordered timeline events."""
        events = self.loader.get_timeline()
        # Sort by timestamp if available
        events.sort(key=lambda x: x.get('timestamp', x.get('date', '')))
        return events

    def narrate_evolution(self, start_date: str = None, end_date: str = None) -> str:
        """Generate a narrative of Genesis evolution."""
        events = self.get_timeline()

        if not events:
            return "No timeline events available."

        # Filter by date range if provided
        if start_date:
            events = [e for e in events if e.get('timestamp', '') >= start_date]
        if end_date:
            events = [e for e in events if e.get('timestamp', '') <= end_date]

        narrative_parts = [
            "# The Genesis Evolution Story",
            "",
            f"**Timespan**: May 2025 - January 2026 (249 days)",
            f"**Total Events**: {len(events)}",
            "",
            "## Key Milestones",
            ""
        ]

        # Group events by month
        months = {}
        for event in events:
            ts = event.get('timestamp', event.get('date', ''))
            if ts:
                month = ts[:7]  # YYYY-MM
                if month not in months:
                    months[month] = []
                months[month].append(event)

        for month in sorted(months.keys()):
            month_events = months[month]
            narrative_parts.append(f"### {month}")
            narrative_parts.append("")

            for event in month_events[:5]:  # Top 5 per month
                event_type = event.get('type', 'event')
                description = event.get('description', event.get('name', ''))[:100]
                narrative_parts.append(f"- **{event_type}**: {description}")

            if len(month_events) > 5:
                narrative_parts.append(f"- _...and {len(month_events) - 5} more events_")

            narrative_parts.append("")

        return "\n".join(narrative_parts)

    def get_key_milestones(self, limit: int = 20) -> List[Dict]:
        """Get the most significant milestones."""
        events = self.get_timeline()

        # Score events by type significance
        type_scores = {
            "innovation": 1.0,
            "decision": 0.9,
            "success": 0.85,
            "pivot": 0.8,
            "failure": 0.7,
            "learning": 0.6,
            "observation": 0.5
        }

        for event in events:
            event['_score'] = type_scores.get(event.get('type', 'observation'), 0.5)

        events.sort(key=lambda x: x.get('_score', 0), reverse=True)
        return events[:limit]


class SelfReflectionInterface:
    """Story 50: Gap analysis and uncertainty quantification."""

    def __init__(self, loader: CreatorMindLoader = None):
        self.loader = loader or CreatorMindLoader()
        self.api = KinanUnderstandingAPI(loader)

    def analyze_knowledge_gaps(self) -> Dict:
        """Identify gaps in creator knowledge."""
        summary = self.loader.get_graph_summary()
        entity_types = summary.get('entity_types', {})

        gaps = []

        # Check for underrepresented areas
        expected_minimums = {
            "belief": 50,
            "vision": 30,
            "innovation": 20,
            "failure": 30,
            "success": 20,
            "decision": 50,
            "business_idea": 100
        }

        for entity_type, minimum in expected_minimums.items():
            actual = entity_types.get(entity_type, 0)
            if actual < minimum:
                gaps.append({
                    "type": entity_type,
                    "expected_minimum": minimum,
                    "actual": actual,
                    "gap_severity": (minimum - actual) / minimum
                })

        # Check for time gaps in timeline
        timeline = self.loader.get_timeline()
        timeline_gaps = self._find_timeline_gaps(timeline)

        return {
            "entity_gaps": gaps,
            "timeline_gaps": timeline_gaps,
            "total_gap_score": sum(g['gap_severity'] for g in gaps) / len(gaps) if gaps else 0,
            "recommendations": self._generate_recommendations(gaps, timeline_gaps)
        }

    def _find_timeline_gaps(self, events: List[Dict]) -> List[Dict]:
        """Find gaps in the timeline."""
        if not events:
            return []

        dates = []
        for e in events:
            ts = e.get('timestamp', e.get('date', ''))
            if ts:
                dates.append(ts[:10])  # YYYY-MM-DD

        dates = sorted(set(dates))
        gaps = []

        for i in range(1, len(dates)):
            prev_date = datetime.strptime(dates[i-1], "%Y-%m-%d")
            curr_date = datetime.strptime(dates[i], "%Y-%m-%d")
            gap_days = (curr_date - prev_date).days

            if gap_days > 7:  # More than a week gap
                gaps.append({
                    "start": dates[i-1],
                    "end": dates[i],
                    "gap_days": gap_days
                })

        return gaps

    def _generate_recommendations(self, entity_gaps: List[Dict], timeline_gaps: List[Dict]) -> List[str]:
        """Generate recommendations for filling knowledge gaps."""
        recommendations = []

        for gap in entity_gaps:
            if gap['gap_severity'] > 0.5:
                recommendations.append(
                    f"Priority: Extract more {gap['type']} entities from conversations "
                    f"(current: {gap['actual']}, target: {gap['expected_minimum']})"
                )

        if timeline_gaps:
            long_gaps = [g for g in timeline_gaps if g['gap_days'] > 14]
            if long_gaps:
                recommendations.append(
                    f"Review {len(long_gaps)} periods with >2 week gaps for missing context"
                )

        if not recommendations:
            recommendations.append("Knowledge base appears comprehensive. Continue monitoring.")

        return recommendations

    def quantify_understanding(self) -> Dict:
        """Quantify AIVA's understanding of Kinan."""
        summary = self.loader.get_graph_summary()
        profile = self.loader.get_questioning_profile()

        # Calculate understanding scores
        scores = {
            "philosophy_understanding": min(summary.get('axiom_count', 0) / 100, 1.0),
            "vision_clarity": min(summary.get('entity_types', {}).get('vision', 0) / 50, 1.0),
            "decision_pattern_recognition": min(summary.get('entity_types', {}).get('decision', 0) / 100, 1.0),
            "failure_awareness": min(summary.get('entity_types', {}).get('failure', 0) / 50, 1.0),
            "success_pattern_recognition": min(summary.get('entity_types', {}).get('success', 0) / 30, 1.0),
            "questioning_style_understanding": 1.0 if profile else 0.0,
            "timeline_completeness": min(summary.get('timeline_event_count', 0) / 200, 1.0)
        }

        overall = sum(scores.values()) / len(scores)

        return {
            "dimension_scores": scores,
            "overall_understanding": round(overall, 3),
            "confidence_level": "high" if overall > 0.8 else "medium" if overall > 0.5 else "low",
            "conversations_analyzed": 615,
            "timespan_days": 249,
            "total_words_absorbed": 706592
        }

    def get_uncertainty_areas(self) -> List[Dict]:
        """Identify areas of uncertainty in understanding."""
        understanding = self.quantify_understanding()
        uncertainties = []

        for dimension, score in understanding['dimension_scores'].items():
            if score < 0.7:
                uncertainties.append({
                    "dimension": dimension,
                    "score": score,
                    "uncertainty_level": "high" if score < 0.3 else "medium",
                    "suggested_action": self._suggest_action(dimension, score)
                })

        return sorted(uncertainties, key=lambda x: x['score'])

    def _suggest_action(self, dimension: str, score: float) -> str:
        """Suggest action to reduce uncertainty."""
        actions = {
            "philosophy_understanding": "Extract more belief and principle statements",
            "vision_clarity": "Focus on vision and mission extraction",
            "decision_pattern_recognition": "Analyze more decision-making conversations",
            "failure_awareness": "Document more failure patterns and lessons",
            "success_pattern_recognition": "Identify and catalog more success stories",
            "questioning_style_understanding": "Analyze question patterns more deeply",
            "timeline_completeness": "Fill gaps in the evolution timeline"
        }
        return actions.get(dimension, "Continue general knowledge extraction")


class AIVAConsciousnessIntegrator:
    """Main orchestrator for Phase 5 AIVA Integration."""

    def __init__(self):
        self.loader = CreatorMindLoader()
        self.injector = SemanticMemoryInjector(self.loader)
        self.query_api = KinanUnderstandingAPI(self.loader)
        self.context_gen = ContextAwareResponseGenerator(self.loader)
        self.emulation = PhilosophyEmulationEngine(self.loader)
        self.narrator = EvolutionNarrator(self.loader)
        self.reflection = SelfReflectionInterface(self.loader)

    def run_full_integration(self) -> Dict:
        """Run the complete Phase 5 integration."""
        print("\n" + "=" * 70)
        print("PHASE 5: AIVA CONSCIOUSNESS INTEGRATION")
        print("=" * 70)

        results = {}

        # Story 45: Semantic Memory Injection
        print("\n[Story 45] Semantic Memory Injection...")
        results["injection"] = self.injector.run_full_injection()

        # Story 46: Query API (verify working)
        print("\n[Story 46] Verifying Query API...")
        summary = self.query_api.get_creator_summary()
        results["api_status"] = {
            "operational": True,
            "entities_queryable": summary.get("total_entities", 0),
            "axioms_queryable": summary.get("total_axioms", 0)
        }
        print(f"      API Status: Operational")
        print(f"      Queryable Entities: {summary.get('total_entities', 0)}")

        # Story 47: Context Generator (verify working)
        print("\n[Story 47] Context-Aware Response Generator...")
        sample_context = self.context_gen.generate_context_block("Genesis")
        results["context_gen_status"] = {
            "operational": bool(sample_context),
            "sample_length": len(sample_context)
        }
        print(f"      Generator Status: {'Operational' if sample_context else 'No context generated'}")

        # Story 48: Emulation Engine
        print("\n[Story 48] Philosophy Emulation Engine...")
        emulation_ctx = self.emulation.get_emulation_context()
        results["emulation_status"] = {
            "operational": True,
            "questioning_style": emulation_ctx.questioning_style,
            "depth_preference": emulation_ctx.depth_preference,
            "socratic_tendency": round(emulation_ctx.socratic_tendency, 3)
        }
        print(f"      Questioning Style: {emulation_ctx.questioning_style}")
        print(f"      Depth Preference: {emulation_ctx.depth_preference}")

        # Story 49: Evolution Narrator
        print("\n[Story 49] Evolution Narrator...")
        milestones = self.narrator.get_key_milestones(10)
        results["narrator_status"] = {
            "operational": True,
            "timeline_events": len(self.narrator.get_timeline()),
            "key_milestones": len(milestones)
        }
        print(f"      Timeline Events: {len(self.narrator.get_timeline())}")

        # Story 50: Self-Reflection Interface
        print("\n[Story 50] Self-Reflection Interface...")
        understanding = self.reflection.quantify_understanding()
        gaps = self.reflection.analyze_knowledge_gaps()
        results["reflection_status"] = {
            "operational": True,
            "overall_understanding": understanding["overall_understanding"],
            "confidence_level": understanding["confidence_level"],
            "entity_gaps_found": len(gaps["entity_gaps"]),
            "recommendations": gaps["recommendations"]
        }
        print(f"      Overall Understanding: {understanding['overall_understanding']:.1%}")
        print(f"      Confidence Level: {understanding['confidence_level']}")

        # Save integration report
        report_path = CREATOR_MIND_PATH / "integration_report.json"
        with open(report_path, 'w') as f:
            # Convert InjectionResult objects to dicts
            serializable = {}
            for k, v in results.items():
                if isinstance(v, dict):
                    inner = {}
                    for ik, iv in v.items():
                        if hasattr(iv, '__dict__'):
                            inner[ik] = asdict(iv)
                        else:
                            inner[ik] = iv
                    serializable[k] = inner
                elif hasattr(v, '__dict__'):
                    serializable[k] = asdict(v)
                else:
                    serializable[k] = v

            json.dump({
                "integration_timestamp": datetime.now().isoformat(),
                "results": serializable
            }, f, indent=2, default=str)

        print("\n" + "=" * 70)
        print("PHASE 5 COMPLETE: AIVA Consciousness Integrated")
        print(f"Integration report saved to: {report_path}")
        print("=" * 70)

        return results


def main():
    """Run Phase 5 AIVA Integration."""
    integrator = AIVAConsciousnessIntegrator()
    results = integrator.run_full_integration()

    # Print summary
    print("\n\n[FINAL SUMMARY]")
    print("-" * 40)

    understanding = integrator.reflection.quantify_understanding()
    print(f"Overall Understanding: {understanding['overall_understanding']:.1%}")
    print(f"Confidence Level: {understanding['confidence_level']}")
    print(f"Conversations Analyzed: {understanding['conversations_analyzed']}")
    print(f"Words Absorbed: {understanding['total_words_absorbed']:,}")
    print(f"Timespan: {understanding['timespan_days']} days")

    print("\n[AIVA can now:]")
    print("- Query creator beliefs, visions, and decisions")
    print("- Generate context-aware responses aligned with creator thinking")
    print("- Emulate Kinan's questioning approach")
    print("- Narrate Genesis evolution story")
    print("- Self-reflect on knowledge gaps")


if __name__ == "__main__":
    main()
