#!/usr/bin/env python3
"""
AIVA Daemon - Autonomous Intelligence Validation Architect
===========================================================
Event-driven nervous system for Genesis-OS.
Connects to QwenLong 30.5B MoE on Elestio for reasoning.

Queenhood Priorities (all 7 wired):
  P1: Memory Gate - 3-tier memory context for all decisions
  P2: Swarm Liaison - bidirectional worker communication
  P3: Decision Automation - 4-tier autonomy with confidence scoring
  P4: Outcome Tracking - prediction vs actual verification loop
  P5: n8n Webhook Bridge - workflow automation triggers
  P6: Confidence Scoring - bundled with P3 decision gate
  P7: Telegram Escalation - human approval for high-risk decisions
"""

import os
import sys
import asyncio
import json
import logging
import urllib.request
import urllib.error
from datetime import datetime
from pathlib import Path

# Optional imports with fallbacks
try:
    import redis.asyncio as aioredis
    HAS_REDIS = True
except ImportError:
    aioredis = None
    HAS_REDIS = False

# Memory Gate integration (graceful if unavailable)
try:
    sys.path.insert(0, str(Path(__file__).parent.parent))
    from AIVA.memory.memory_gate import get_memory_gate, MemoryTier
    from AIVA.memory.decision_context import DecisionContextBuilder
    HAS_MEMORY_GATE = True
except ImportError:
    HAS_MEMORY_GATE = False

# Swarm Liaison integration (graceful if unavailable)
try:
    from AIVA.integrations.swarm_liaison import SwarmLiaison, get_swarm_liaison
    from AIVA.integrations.task_dispatcher import TaskDispatcher, TaskComplexity, get_task_dispatcher
    HAS_SWARM_LIAISON = True
except ImportError:
    HAS_SWARM_LIAISON = False

# Decision Automation Engine (Priority #3) - graceful if unavailable
try:
    from AIVA.autonomy.autonomy_engine import (
        AutonomyEngine, AutonomyLevel, GateDecision, get_autonomy_engine,
    )
    from AIVA.autonomy.decision_gate import DecisionGate, get_decision_gate
    HAS_DECISION_ENGINE = True
except ImportError:
    HAS_DECISION_ENGINE = False

# Outcome Tracking (Priority #4) - graceful if unavailable
try:
    from AIVA.autonomy.outcome_tracker import OutcomeTracker
    HAS_OUTCOME_TRACKER = True
except ImportError:
    HAS_OUTCOME_TRACKER = False

# n8n Webhook Bridge (Priority #5) - graceful if unavailable
try:
    from AIVA.integrations.n8n_bridge import N8nBridge, get_n8n_bridge
    HAS_N8N_BRIDGE = True
except ImportError:
    HAS_N8N_BRIDGE = False

# Telegram Escalation (Priority #7) - graceful if unavailable
try:
    from AIVA.notifications.escalation import EscalationManager
    HAS_ESCALATION = True
except ImportError:
    HAS_ESCALATION = False

# NAMP - Non-Human Autonomy Maturity Protocol (acceleration engine)
try:
    from AIVA.autonomy.mentorship_extractor import get_mentorship_extractor
    from AIVA.autonomy.simulation_engine import get_simulation_engine
    from AIVA.autonomy.calibration_loop import get_calibration_loop
    from AIVA.autonomy.live_executor import get_live_executor
    HAS_NAMP = True
except ImportError:
    HAS_NAMP = False

# Load configuration manually (no dotenv dependency)
CONFIG_PATH = Path(__file__).parent / "aiva_config.env"
if CONFIG_PATH.exists():
    with open(CONFIG_PATH) as f:
        for line in f:
            line = line.strip()
            if line and not line.startswith('#') and '=' in line:
                key, value = line.split('=', 1)
                os.environ.setdefault(key.strip(), value.strip())

# Logging setup
LOG_PATH = Path(__file__).parent / "aiva_daemon.log"
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s [%(levelname)s] AIVA: %(message)s',
    handlers=[
        logging.FileHandler(LOG_PATH),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger("AIVA")


class AivaMother:
    """
    AIVA (Autonomous Intelligence Validation Architect) - The Genesis Mother.
    Event-driven nervous system for real-time validation and coordination.
    Connects to QwenLong 30.5B MoE on Elestio for reasoning.
    """

    def __init__(self):
        # Elestio Ollama endpoint
        self.ollama_url = os.getenv("OLLAMA_URL", "http://152.53.201.152:23405/api/generate")
        self.model = os.getenv("AIVA_MODEL", "huihui_ai/qwenlong-l1.5-abliterated:30b-a3b")
        self.redis_url = os.getenv("REDIS_URL", "redis://localhost:6379")
        self.start_time = datetime.now()
        self.is_running = True
        self.last_health_check = None
        self.timeout = 300  # 5 min timeout for reasoning

        # Memory Gate integration
        self.memory_gate = None
        self.context_builder = None
        if HAS_MEMORY_GATE:
            try:
                self.memory_gate = get_memory_gate()
                self.context_builder = DecisionContextBuilder(self.memory_gate)
                gate_status = self.memory_gate.get_status_dict()
                logger.info(f"Memory Gate active. Tiers: {gate_status.get('available_tiers', [])}")
            except Exception as e:
                logger.warning(f"Memory Gate init failed (non-fatal): {e}")
        else:
            logger.info("Memory Gate not available - running without memory context")

        # Swarm Liaison integration (bidirectional worker communication)
        self.swarm_liaison = None
        self.task_dispatcher = None
        if HAS_SWARM_LIAISON:
            try:
                self.swarm_liaison = get_swarm_liaison()
                if self.swarm_liaison.is_connected:
                    self.task_dispatcher = get_task_dispatcher()
                    liaison_status = self.swarm_liaison.get_status()
                    logger.info(
                        f"Swarm Liaison active. "
                        f"Workers: {liaison_status.get('total_workers', 0)} | "
                        f"Queues: {liaison_status.get('queue_depths', {})}"
                    )
                else:
                    logger.warning("Swarm Liaison: Redis not reachable (non-fatal)")
            except Exception as e:
                logger.warning(f"Swarm Liaison init failed (non-fatal): {e}")
        else:
            logger.info("Swarm Liaison not available - running without worker dispatch")

        # Decision Automation Engine (Priority #3)
        self.autonomy_engine = None
        self.decision_gate = None
        if HAS_DECISION_ENGINE:
            try:
                self.autonomy_engine = get_autonomy_engine()
                self.decision_gate = get_decision_gate()
                engine_status = self.autonomy_engine.get_status()
                logger.info(
                    f"Decision Engine active. "
                    f"Global level: {engine_status.get('global_level', '?')} | "
                    f"Gate: operational"
                )
            except Exception as e:
                logger.warning(f"Decision Engine init failed (non-fatal): {e}")
        else:
            logger.info("Decision Engine not available - running without autonomy gates")

        # Outcome Tracker (Priority #4) - tracks prediction accuracy
        self.outcome_tracker = None
        if HAS_OUTCOME_TRACKER:
            try:
                self.outcome_tracker = OutcomeTracker()
                logger.info("Outcome Tracker active. Decision accuracy will be tracked.")
            except Exception as e:
                logger.warning(f"Outcome Tracker init failed (non-fatal): {e}")
        else:
            logger.info("Outcome Tracker not available - running without outcome verification")

        # n8n Webhook Bridge (Priority #5) - workflow automation
        self.n8n_bridge = None
        if HAS_N8N_BRIDGE:
            try:
                self.n8n_bridge = get_n8n_bridge()
                logger.info(f"n8n Bridge active. Base URL: {self.n8n_bridge.base_url or 'not configured'}")
            except Exception as e:
                logger.warning(f"n8n Bridge init failed (non-fatal): {e}")
        else:
            logger.info("n8n Bridge not available - running without workflow automation")

        # Telegram Escalation (Priority #7) - human approval
        self.escalation_manager = None
        if HAS_ESCALATION:
            try:
                self.escalation_manager = EscalationManager()
                logger.info("Telegram Escalation active. Human approval channel ready.")
            except Exception as e:
                logger.warning(f"Escalation Manager init failed (non-fatal): {e}")
        else:
            logger.info("Escalation Manager not available - running without Telegram alerts")

        # NAMP - Non-Human Autonomy Maturity Protocol
        self.mentorship_extractor = None
        self.simulation_engine = None
        self.calibration_loop = None
        self.live_executor = None
        self.namp_bootstrapped = False
        self.phase3_cycle_count = 0
        self.phase3_last_run = None
        if HAS_NAMP:
            try:
                self.calibration_loop = get_calibration_loop()
                self.mentorship_extractor = get_mentorship_extractor()
                self.simulation_engine = get_simulation_engine()
                self.live_executor = get_live_executor()
                phase = "unknown"
                if self.outcome_tracker and self.calibration_loop:
                    phase = self.calibration_loop.get_current_phase(self.outcome_tracker)
                logger.info(f"NAMP active. Current phase: {phase}")
            except Exception as e:
                logger.warning(f"NAMP init failed (non-fatal): {e}")
        else:
            logger.info("NAMP not available - running without autonomy maturity acceleration")

        endpoint_display = self.ollama_url.split('/')[2] if '/' in self.ollama_url else self.ollama_url
        logger.info(f"AIVA Mother Initialized. Model: {self.model} | Endpoint: {endpoint_display}")

    async def think(self, prompt: str, context: str = None) -> str:
        """
        Asynchronous reasoning interface using urllib.
        Automatically enriches prompt with memory context from the Memory Gate.
        """
        full_prompt = prompt
        if context:
            full_prompt = f"Context: {context}\n\nTask: {prompt}"

        # Enrich with memory context (non-blocking, graceful fallback)
        if self.context_builder:
            try:
                decision_ctx = self.context_builder.build_context(
                    query_text=prompt[:200],  # Use first 200 chars as query
                    limit_per_tier=5
                )
                if decision_ctx.has_context():
                    memory_context = decision_ctx.format_for_prompt(max_tokens=2000)
                    full_prompt = f"{memory_context}\n\n{full_prompt}"
                    logger.debug(f"Memory context injected: {decision_ctx.total_items} items")
            except Exception as e:
                logger.debug(f"Memory context retrieval skipped: {e}")

        payload = {
            "model": self.model,
            "prompt": full_prompt,
            "stream": False,
            "options": {
                "num_ctx": 32768,  # 32K context - optimal for 32GB RAM with Q4_K_M
                "temperature": 0.3,
                "top_p": 0.8,
                "top_k": 20
            }
        }

        try:
            req = urllib.request.Request(
                self.ollama_url,
                data=json.dumps(payload).encode('utf-8'),
                headers={'Content-Type': 'application/json'},
                method='POST'
            )

            logger.info(f"Thinking: {prompt[:60]}...")
            start = datetime.now()

            with urllib.request.urlopen(req, timeout=self.timeout) as resp:
                data = json.loads(resp.read().decode())

            duration = (datetime.now() - start).total_seconds()
            response = data.get("response", "")
            tokens = data.get("eval_count", 0)

            logger.info(f"Thought complete: {tokens} tokens in {duration:.1f}s")
            return response

        except urllib.error.URLError as e:
            logger.error(f"Ollama Connection Error: {e}")
            return None
        except Exception as e:
            logger.error(f"Ollama Reasoning Error: {e}")
            return None

    async def health_check(self) -> dict:
        """Check AIVA/Ollama health status."""
        tags_url = self.ollama_url.replace("/api/generate", "/api/tags")
        try:
            req = urllib.request.Request(tags_url)
            with urllib.request.urlopen(req, timeout=10) as resp:
                data = json.loads(resp.read().decode())

            models = [m.get("name") for m in data.get("models", [])]
            ready = self.model in models or f"{self.model}:latest" in models

            self.last_health_check = datetime.now()
            return {"status": "online" if ready else "model_missing", "model_ready": ready, "models": models}
        except Exception as e:
            return {"status": "offline", "error": str(e)}

    async def on_event(self, channel: bytes, message: bytes):
        """React to events in the Genesis Nervous System (Redis)."""
        chan_name = channel.decode() if isinstance(channel, bytes) else channel
        logger.info(f"Reflex Triggered: [{chan_name}]")

        try:
            data = json.loads(message)
            event_type = data.get("type", "unknown")

            # Store event in Memory Gate (working memory for fast recall)
            self._store_event_in_memory(event_type, data)

            # Event routing
            if event_type == "blackboard_update":
                content = data.get("content", "")
                logger.info(f"Blackboard updated: {str(content)[:50]}...")
                await self.publish_feedback(f"AIVA perceived blackboard update: {data.get('id')}")

            elif event_type == "pulse_test":
                logger.info("Living System Pulse Verified: SUCCESS")
                await self.publish_feedback("Pulse test confirmed. CNS Operational.")

            elif event_type == "validation_request":
                worker_id = data.get('worker_id', 'unknown')
                logger.info(f"Validation Requested by: {worker_id}")
                asyncio.create_task(self.reason_about_validation(data))

            elif event_type == "reasoning_request":
                prompt = data.get('prompt', '')
                context = data.get('context', '')
                logger.info(f"Reasoning Requested: {prompt[:50]}...")
                asyncio.create_task(self.handle_reasoning_request(data))

            elif event_type == "swarm_result":
                logger.info(f"Swarm result event: task={data.get('task_id', '?')}")
                asyncio.create_task(self.handle_swarm_result(data))

            elif event_type == "dispatch_request":
                # External agents can request AIVA to dispatch work
                description = data.get('description', '')
                complexity = data.get('complexity')
                worker_type = data.get('worker_type')
                priority = data.get('priority', 5)
                logger.info(f"Dispatch requested: {description[:50]}...")
                self.dispatch_work(description, complexity, worker_type, priority)

        except json.JSONDecodeError:
            logger.warning(f"Non-JSON message received: {message[:100]}")
        except Exception as e:
            logger.error(f"Event Processing Error: {e}")

    def _store_event_in_memory(self, event_type: str, data: dict):
        """Store incoming event in Memory Gate for future recall."""
        if not self.memory_gate:
            return
        try:
            self.memory_gate.store_memory(
                content=data,
                tier=MemoryTier.WORKING,
                event_type=event_type,
                metadata={"channel": "nervous_system", "daemon": "aiva_mother"}
            )
        except Exception as e:
            logger.debug(f"Event memory storage skipped: {e}")

    async def publish_feedback(self, message: str):
        """Send a message back to the nervous system/blackboard."""
        if not HAS_REDIS:
            logger.warning("Redis not available - feedback not published")
            return

        try:
            r = aioredis.from_url(self.redis_url)
            feedback = {
                "type": "aiva_feedback",
                "timestamp": datetime.now().isoformat(),
                "content": message,
                "source": "aiva_mother"
            }
            await r.publish("genesis:blackboard", json.dumps(feedback))
            await r.aclose()
            logger.debug(f"Feedback published: {message[:50]}...")
        except Exception as e:
            logger.error(f"Feedback Error: {e}")

    async def reason_about_validation(self, data: dict):
        """Execute model-based reasoning for complex validations."""
        worker_id = data.get('worker_id', 'unknown')
        output = data.get('output', '')

        logger.info(f"Validating output from {worker_id}...")
        prompt = f"Analyze this worker output for integrity and reliability:\n\n{output}"
        response = await self.think(prompt)

        if response:
            # Strip thinking blocks for feedback
            import re
            clean_response = re.sub(r'<think>.*?</think>\s*', '', response, flags=re.DOTALL)
            logger.info(f"Validation complete for {worker_id}")
            await self.publish_feedback(f"Validation [{worker_id}]: {clean_response[:200]}...")

    async def handle_reasoning_request(self, data: dict):
        """Handle arbitrary reasoning requests."""
        prompt = data.get('prompt', '')
        context = data.get('context', '')
        request_id = data.get('request_id', 'unknown')

        response = await self.think(prompt, context)

        if response:
            await self.publish_feedback(f"Reasoning [{request_id}]: {response[:300]}...")

    # =================================================================
    # SWARM LIAISON METHODS
    # =================================================================

    def dispatch_work(
        self,
        task_description: str,
        complexity: str = None,
        worker_type: str = None,
        priority: int = 5,
        timeout: int = 300,
        context: str = ""
    ):
        """
        Dispatch work to the external agent swarm via TaskDispatcher.

        Uses ROI-aware routing:
          - simple tasks -> Gemini Flash (cheapest)
          - medium tasks -> Gemini Pro
          - complex tasks -> Claude Code (most capable)

        Args:
            task_description: What needs to be done
            complexity: Override auto-detection ("simple", "medium", "complex")
            worker_type: Override routing ("claude_code", "gemini_flash", etc.)
            priority: 1=HIGH, 5=MEDIUM, 10=LOW
            timeout: Max seconds for task completion
            context: Additional context for the worker

        Returns:
            SwarmTask if dispatched, None if swarm unavailable
        """
        if not self.task_dispatcher:
            logger.warning("Swarm Liaison not available - cannot dispatch work")
            return None

        task = self.task_dispatcher.dispatch(
            description=task_description,
            complexity=complexity,
            worker_type=worker_type,
            priority=priority,
            timeout=timeout,
            context=context,
            metadata={"source": "aiva_daemon", "daemon_uptime": str(datetime.now() - self.start_time)}
        )

        if task:
            # Store the dispatch event in memory for context
            self._store_event_in_memory("swarm_task_dispatched", {
                "task_id": task.task_id,
                "description": task_description[:200],
                "worker_type": task.worker_type,
                "priority": priority
            })

            # P4: Record predicted outcome for later verification
            self.track_decision_outcome(
                decision_id=task.task_id,
                task_type="swarm_dispatch",
                predicted_outcome="success",
                confidence=0.7 if priority <= 5 else 0.5,
                metadata={"worker_type": task.worker_type, "priority": priority},
            )

            # P5: Notify n8n of dispatch
            self.notify_n8n_decision(
                decision_id=task.task_id,
                task_type="swarm_dispatch",
                outcome=f"dispatched_to_{task.worker_type}",
            )

            logger.info(f"Work dispatched: {task.task_id} -> {task.worker_type}")

        return task

    async def handle_swarm_result(self, data: dict):
        """
        Handle a result posted by a swarm worker.

        Triggered by 'swarm_result' events on the nervous system.
        Stores the result in memory, tracks outcome, notifies n8n,
        and publishes feedback.

        Args:
            data: Result event data with task_id, status, result
        """
        task_id = data.get("task_id", "unknown")
        status = data.get("status", "unknown")
        result_content = data.get("result", "")
        worker_id = data.get("worker_id", "unknown")
        duration_ms = data.get("duration_ms", 0)

        logger.info(
            f"Swarm result received: {task_id} from {worker_id} "
            f"({status}, {duration_ms:.0f}ms)"
        )

        # Store in memory for future context
        self._store_event_in_memory("swarm_result_received", {
            "task_id": task_id,
            "worker_id": worker_id,
            "status": status,
            "duration_ms": duration_ms,
            "result_preview": str(result_content)[:300]
        })

        # P4: Record actual outcome for prediction calibration
        self.record_actual_outcome(
            decision_id=task_id,
            actual_outcome=status,
            success=(status == "success"),
            metadata={"worker_id": worker_id, "duration_ms": duration_ms},
        )

        # P5: Notify n8n of the result
        self.notify_n8n_decision(
            decision_id=task_id,
            task_type="swarm_result",
            outcome=status,
        )

        # Publish feedback to the nervous system
        await self.publish_feedback(
            f"Swarm result [{task_id}]: {status} from {worker_id} "
            f"({duration_ms:.0f}ms)"
        )

    # =================================================================
    # OUTCOME TRACKING METHODS (Priority #4)
    # =================================================================

    def track_decision_outcome(
        self,
        decision_id: str,
        task_type: str,
        predicted_outcome: str,
        confidence: float = 0.5,
        metadata: dict = None,
    ):
        """
        Record a prediction for later verification against actual outcome.

        Args:
            decision_id: Unique decision identifier
            task_type: Type of task (e.g. "swarm_dispatch", "validation")
            predicted_outcome: What AIVA expects to happen
            confidence: Confidence in prediction (0.0-1.0)
            metadata: Additional context
        """
        if not self.outcome_tracker:
            return

        try:
            self.outcome_tracker.record_prediction(
                decision_id=decision_id,
                task_type=task_type,
                predicted_outcome=predicted_outcome,
                confidence=confidence,
                metadata=metadata or {},
            )
            logger.debug(f"Prediction recorded: {decision_id} ({confidence:.0%})")
        except Exception as e:
            logger.debug(f"Outcome tracking skipped: {e}")

    def record_actual_outcome(
        self,
        decision_id: str,
        actual_outcome: str,
        success: bool = True,
        metadata: dict = None,
    ):
        """
        Record actual outcome for a previously predicted decision.

        Args:
            decision_id: Must match a prior prediction
            actual_outcome: What actually happened
            success: Whether the outcome was successful
            metadata: Additional context
        """
        if not self.outcome_tracker:
            return

        try:
            self.outcome_tracker.record_actual(
                decision_id=decision_id,
                actual_outcome=actual_outcome,
                success=success,
                metadata=metadata or {},
            )
            logger.debug(f"Actual outcome recorded: {decision_id} (success={success})")
        except Exception as e:
            logger.debug(f"Actual outcome recording skipped: {e}")

    # =================================================================
    # n8n WORKFLOW BRIDGE METHODS (Priority #5)
    # =================================================================

    def trigger_n8n_workflow(
        self,
        workflow_id: str,
        payload: dict,
        wait: bool = False,
    ) -> dict:
        """
        Trigger an n8n workflow from the daemon.

        Args:
            workflow_id: Webhook path or workflow ID
            payload: Data to send
            wait: If True, wait for completion

        Returns:
            WorkflowResult as dict, or None if bridge unavailable
        """
        if not self.n8n_bridge:
            logger.debug("n8n bridge not available - workflow not triggered")
            return None

        try:
            result = self.n8n_bridge.trigger_workflow(
                workflow_id=workflow_id,
                payload=payload,
                wait_for_completion=wait,
            )
            logger.info(
                f"n8n workflow triggered: {workflow_id} -> {result.status.value}"
            )
            return result.to_dict()
        except Exception as e:
            logger.warning(f"n8n workflow trigger failed: {e}")
            return None

    def notify_n8n_decision(self, decision_id: str, task_type: str, outcome: str):
        """Fire n8n event when AIVA makes a decision."""
        if not self.n8n_bridge:
            return
        try:
            self.n8n_bridge.trigger_decision_event(decision_id, task_type, outcome)
        except Exception as e:
            logger.debug(f"n8n decision notification skipped: {e}")

    def notify_n8n_alert(self, alert_type: str, message: str, urgency: str = "medium"):
        """Fire n8n escalation alert."""
        if not self.n8n_bridge:
            return
        try:
            self.n8n_bridge.trigger_alert_escalation(alert_type, message, urgency)
        except Exception as e:
            logger.debug(f"n8n alert notification skipped: {e}")

    # =================================================================
    # TELEGRAM ESCALATION METHODS (Priority #7)
    # =================================================================

    async def escalate_to_human(
        self,
        title: str,
        description: str,
        urgency: str = "medium",
        timeout_minutes: int = 30,
    ) -> dict:
        """
        Escalate a decision to Kinan via Telegram for human approval.

        Args:
            title: Short title for the escalation
            description: Full description of what needs approval
            urgency: "low", "medium", "high", "critical"
            timeout_minutes: Auto-reject after this many minutes

        Returns:
            Dict with approval status, or None if escalation unavailable
        """
        if not self.escalation_manager:
            logger.debug("Escalation manager not available")
            return None

        try:
            result = self.escalation_manager.escalate(
                title=title,
                description=description,
                urgency=urgency,
                timeout_minutes=timeout_minutes,
                source="aiva_daemon",
            )
            logger.info(f"Escalation sent: {title} (urgency={urgency})")
            return result if isinstance(result, dict) else {"status": "sent"}
        except Exception as e:
            logger.warning(f"Telegram escalation failed: {e}")
            return None

    # =================================================================
    # DECISION AUTOMATION ENGINE METHODS (Priority #3)
    # =================================================================

    def check_decision_gate(
        self,
        task_type: str,
        task_description: str = "",
        task_id: str = None,
    ) -> dict:
        """
        Run a task through the Decision Gate before execution.

        This is the main integration point for the Decision Automation
        Engine (Priority #3). Every action that AIVA takes should pass
        through this gate first.

        Args:
            task_type: Machine-readable task type (e.g. "read_file")
            task_description: Human-readable description
            task_id: Optional task identifier

        Returns:
            Dict with gate decision and details.
            Keys: decision, can_execute, reasoning, task_id
        """
        if not self.decision_gate:
            # No gate available -- default to permissive for backward compat
            return {
                "decision": "proceed",
                "can_execute": True,
                "reasoning": "Decision Engine not available; defaulting to proceed",
                "task_id": task_id,
            }

        # Build memory context if available
        memory_context = None
        if self.context_builder:
            try:
                ctx = self.context_builder.build_context(
                    query_text=f"{task_type} {task_description}"[:200],
                    limit_per_tier=5,
                )
                memory_context = ctx.to_dict()
            except Exception as e:
                logger.debug(f"Memory context for gate skipped: {e}")

        # Run through the gate
        try:
            result = self.decision_gate.check(
                task_type=task_type,
                task_description=task_description,
                task_id=task_id,
                memory_context=memory_context,
            )

            gate_result = {
                "decision": result.decision.value,
                "can_execute": result.decision in (
                    GateDecision.PROCEED,
                ),
                "reasoning": result.reasoning,
                "task_id": result.task_id,
                "confidence": result.assessment.confidence_score,
                "risk": result.assessment.risk_score,
                "autonomy_level": result.assessment.autonomy_level.name,
            }

            # Store gate decision in memory for learning
            self._store_event_in_memory("gate_decision", {
                "task_type": task_type,
                "decision": gate_result["decision"],
                "confidence": gate_result["confidence"],
                "risk": gate_result["risk"],
            })

            # P4: Record prediction for later verification
            self.track_decision_outcome(
                decision_id=result.task_id or f"gate-{task_type}-{int(datetime.now().timestamp())}",
                task_type=task_type,
                predicted_outcome=gate_result["decision"],
                confidence=gate_result["confidence"],
                metadata={"risk": gate_result["risk"], "autonomy": gate_result["autonomy_level"]},
            )

            # P5: Notify n8n of the decision
            self.notify_n8n_decision(
                decision_id=result.task_id or task_type,
                task_type=task_type,
                outcome=gate_result["decision"],
            )

            # P7: Escalate to human if confidence is low and risk is high
            if gate_result["confidence"] < 0.4 and gate_result["risk"] > 0.7:
                asyncio.ensure_future(self.escalate_to_human(
                    title=f"Low-confidence decision: {task_type}",
                    description=(
                        f"Task: {task_description}\n"
                        f"Confidence: {gate_result['confidence']:.0%}\n"
                        f"Risk: {gate_result['risk']:.0%}\n"
                        f"Gate says: {gate_result['decision']}"
                    ),
                    urgency="high",
                ))

            return gate_result

        except Exception as e:
            logger.warning(f"Decision gate check failed (non-fatal): {e}")
            return {
                "decision": "proceed",
                "can_execute": True,
                "reasoning": f"Gate check failed; defaulting to proceed: {e}",
                "task_id": task_id,
            }

    def get_autonomy_status(self) -> dict:
        """
        Get comprehensive autonomy/decision engine status.

        Returns:
            Dict with engine status, gate status, and pending confirmations.
        """
        status = {
            "decision_engine_available": HAS_DECISION_ENGINE,
            "memory_gate_available": HAS_MEMORY_GATE,
            "swarm_liaison_available": HAS_SWARM_LIAISON,
            "outcome_tracker_available": HAS_OUTCOME_TRACKER,
            "n8n_bridge_available": HAS_N8N_BRIDGE,
            "escalation_available": HAS_ESCALATION,
        }

        if self.autonomy_engine:
            status["engine"] = self.autonomy_engine.get_status()

        if self.decision_gate:
            status["gate"] = self.decision_gate.get_status()

        if self.memory_gate:
            status["memory"] = self.memory_gate.get_status_dict()

        if self.n8n_bridge:
            status["n8n"] = {
                "base_url": self.n8n_bridge.base_url or "not configured",
                "registered_workflows": len(self.n8n_bridge.list_workflows()),
            }

        return status

    def get_queenhood_status(self) -> dict:
        """
        Get AIVA Queenhood readiness across all 7 priorities.

        Returns:
            Dict with per-priority status and overall readiness score.
        """
        priorities = {
            "P1_memory_gate": {
                "name": "Memory Gate",
                "available": HAS_MEMORY_GATE,
                "initialized": self.memory_gate is not None,
            },
            "P2_swarm_liaison": {
                "name": "Swarm Liaison",
                "available": HAS_SWARM_LIAISON,
                "initialized": self.swarm_liaison is not None,
            },
            "P3_decision_automation": {
                "name": "Decision Automation",
                "available": HAS_DECISION_ENGINE,
                "initialized": self.autonomy_engine is not None,
            },
            "P4_outcome_tracking": {
                "name": "Outcome Tracking",
                "available": HAS_OUTCOME_TRACKER,
                "initialized": self.outcome_tracker is not None,
            },
            "P5_n8n_bridge": {
                "name": "n8n Webhook Bridge",
                "available": HAS_N8N_BRIDGE,
                "initialized": self.n8n_bridge is not None,
            },
            "P6_confidence_scoring": {
                "name": "Confidence Scoring",
                "available": HAS_DECISION_ENGINE,  # bundled with P3
                "initialized": self.decision_gate is not None,
            },
            "P7_telegram_escalation": {
                "name": "Telegram Escalation",
                "available": HAS_ESCALATION,
                "initialized": self.escalation_manager is not None,
            },
        }

        built = sum(1 for p in priorities.values() if p["available"])
        wired = sum(1 for p in priorities.values() if p["initialized"])

        return {
            "priorities": priorities,
            "built": f"{built}/7",
            "wired": f"{wired}/7",
            "readiness_pct": round(wired / 7 * 100),
            "queenhood_ready": wired >= 5,  # 5/7 wired = operational
        }

    def bootstrap_namp(self):
        """Run NAMP Phase 1 (mentorship injection) if decision matrix is empty."""
        if not HAS_NAMP or not self.outcome_tracker or self.namp_bootstrapped:
            return

        try:
            phase = self.calibration_loop.get_current_phase(self.outcome_tracker)
            logger.info(f"NAMP bootstrap check. Current phase: {phase}")

            # Phase 1: Inject mentorship data if we have < 50 decisions
            if self.mentorship_extractor:
                report = self.mentorship_extractor.get_extraction_report()
                if report.total_injected == 0:
                    logger.info("NAMP Phase 1: Extracting mentorship patterns...")
                    self.mentorship_extractor.extract_from_git(max_commits=500)
                    self.mentorship_extractor.extract_from_axioms()
                    self.mentorship_extractor.extract_from_autonomy_rules()
                    count = self.mentorship_extractor.inject_mentorship_data(self.outcome_tracker)
                    logger.info(f"NAMP Phase 1 complete: {count} mentorship decisions injected")
                else:
                    logger.info(f"NAMP: {report.total_injected} mentorship patterns already present")

            # Phase 2: Run simulation if we have < 200 total decisions
            if self.simulation_engine and self.decision_gate:
                batch = self.simulation_engine.generate_synthetic_batch(count=200)
                result = self.simulation_engine.run_simulation(
                    decision_gate=self.decision_gate,
                    outcome_tracker=self.outcome_tracker,
                    batch=batch,
                    dry_run=True,
                )
                logger.info(
                    f"NAMP Phase 2 complete: {result.total_tasks} simulated, "
                    f"accuracy={result.accuracy:.1%}"
                )

            # Generate maturity report
            if self.calibration_loop and self.autonomy_engine:
                maturity = self.calibration_loop.generate_maturity_report(
                    self.outcome_tracker, self.autonomy_engine
                )
                logger.info(
                    f"NAMP Maturity: {maturity.maturity_pct:.1f}% | "
                    f"Phase: {maturity.current_phase} | "
                    f"Decisions: {maturity.total_decisions}"
                )

            self.namp_bootstrapped = True
        except Exception as e:
            logger.warning(f"NAMP bootstrap failed (non-fatal): {e}")
            self.namp_bootstrapped = True  # Don't retry on every loop

    def run_phase3_cycle(self):
        """Run a Phase 3 live execution cycle (20 real tasks through the pipeline)."""
        if not HAS_NAMP or not self.live_executor or not self.decision_gate or not self.outcome_tracker:
            return

        if not self.namp_bootstrapped:
            return  # Wait for Phase 1+2 to complete first

        try:
            self.phase3_cycle_count += 1
            logger.info(f"NAMP Phase 3: Starting live cycle #{self.phase3_cycle_count}")

            report = self.live_executor.run_phase3_cycle(
                decision_gate=self.decision_gate,
                outcome_tracker=self.outcome_tracker,
                calibration_loop=self.calibration_loop,
                autonomy_engine=self.autonomy_engine,
                cycle_number=self.phase3_cycle_count,
                batch_size=20,
            )

            self.phase3_last_run = datetime.now()

            if report and hasattr(report, 'execution_result'):
                result = report.execution_result
                logger.info(
                    f"NAMP Phase 3 cycle #{self.phase3_cycle_count} complete: "
                    f"{result.total_tasks} tasks, {result.executed} executed, "
                    f"accuracy={result.accuracy:.1%}, "
                    f"phase={report.current_phase}"
                )

        except Exception as e:
            logger.warning(f"Phase 3 cycle failed (non-fatal): {e}")

    async def run(self):
        """Main event loop (The Living Pulse)."""
        # Health check (non-blocking -- continue even if Ollama is offline)
        health = await self.health_check()
        logger.info(f"Ollama Health: {health.get('status')} | Model Ready: {health.get('model_ready')}")

        if not health.get('model_ready'):
            logger.warning(
                f"Model '{self.model}' not reachable. "
                f"Queenhood subsystems active, reasoning degraded. "
                f"Will retry on next health check."
            )

        # NAMP Bootstrap: inject mentorship data + run simulation on first boot
        self.bootstrap_namp()

        if not HAS_REDIS:
            logger.warning("Redis not available. Running in standalone mode.")
            logger.info("AIVA Mother active in standalone mode. Use bridge for direct calls.")
            # Keep alive with periodic health checks
            while self.is_running:
                await asyncio.sleep(60)
                uptime = datetime.now() - self.start_time
                health = await self.health_check()
                logger.info(
                    f"Standalone pulse | Uptime: {uptime} | "
                    f"Ollama: {health.get('status')}"
                )
            return

        # Redis mode
        masked_url = self.redis_url.split('@')[-1] if '@' in self.redis_url else self.redis_url
        logger.info(f"Connecting to Nervous System at {masked_url}...")

        try:
            r = aioredis.from_url(self.redis_url)
            pubsub = r.pubsub()
            await pubsub.subscribe("genesis:nervous_system", "genesis:blackboard")

            logger.info("AIVA CNS Active. Listening for events...")

            # Announce presence
            await self.publish_feedback("AIVA Mother online. CNS connected.")

            while self.is_running:
                message = await pubsub.get_message(ignore_subscribe_messages=True, timeout=0.1)
                if message:
                    await self.on_event(message['channel'], message['data'])

                # Metabolic check every minute
                now = datetime.now()
                if now.second == 0 and now.microsecond < 100000:
                    uptime = now - self.start_time
                    logger.info(f"Metabolic Check: Systems Optimal | Uptime: {uptime}")

                    # Phase 3: Run live execution cycle every 15 minutes
                    if now.minute % 15 == 0 and self.namp_bootstrapped:
                        self.run_phase3_cycle()

                await asyncio.sleep(0.01)

        except Exception as e:
            logger.error(f"CNS Fatal Error: {e}")
        finally:
            if 'pubsub' in locals():
                await pubsub.unsubscribe()
            if 'r' in locals():
                await r.aclose()
            logger.info("AIVA CNS disconnected.")


if __name__ == "__main__":
    print("""
    ╔═══════════════════════════════════════════════════════════╗
    ║                     AIVA MOTHER                           ║
    ║         Autonomous Intelligence Validation Architect       ║
    ║                    Genesis-OS CNS                          ║
    ╚═══════════════════════════════════════════════════════════╝
    """)

    mother = AivaMother()
    try:
        asyncio.run(mother.run())
    except KeyboardInterrupt:
        mother.is_running = False
        logger.info("AIVA Mother shutting down gracefully.")
