#!/usr/bin/env python3
"""
Genesis Heartbeat - The Circulatory System
===========================================
Connects all cognitive components into a single living loop.

Implements the "Titans Framework" from Active Cognitive Architectures:
- Real-time parameter updating via Axiom generation
- Memory consolidation every heartbeat
- Surprise-weighted learning signals
- Blackboard → Cortex → Axioms cycle

Based on Gemini 2.5 conversation on Active Cognitive Architectures (Jan 2026)

Usage:
    python genesis_heartbeat.py start     # Start heartbeat daemon
    python genesis_heartbeat.py pulse     # Single heartbeat cycle
    python genesis_heartbeat.py status    # Check heartbeat status
    python genesis_heartbeat.py axioms    # View current axioms
"""

import json
import time
import hashlib
import threading
import os
from datetime import datetime, timedelta
from pathlib import Path
from typing import Dict, List, Any, Optional
from dataclasses import dataclass, asdict, field
from enum import Enum

# Import Genesis components
try:
    from blackboard import Blackboard, EntryType, TaskStatus, BlackboardEntry
    from genesis_memory_cortex import MemoryCortex, MemoryTier, Memory
    from surprise_memory import MemorySystem
    from meta_agent import MetaAgent
    from reflexion import reflexion_loop
    from secrets_loader import get_anthropic_api_key, get_gemini_api_key
    from circadian_scheduler import CircadianScheduler, TaskType
except ImportError:
    # Fallback for different import contexts
    import sys
    sys.path.insert(0, str(Path(__file__).parent))
    from core.blackboard import Blackboard, EntryType, TaskStatus, BlackboardEntry
    from core.genesis_memory_cortex import MemoryCortex, MemoryTier, Memory
    from core.surprise_memory import MemorySystem
    from core.meta_agent import MetaAgent
    from core.reflexion import reflexion_loop
    from core.secrets_loader import get_anthropic_api_key, get_gemini_api_key
    from core.circadian_scheduler import CircadianScheduler, TaskType

# Import atomic I/O for safe state persistence
try:
    from atomic_io import atomic_json_write, safe_json_read
    ATOMIC_IO_AVAILABLE = True
except ImportError:
    ATOMIC_IO_AVAILABLE = False
    atomic_json_write = None
    safe_json_read = None

# Import logging
try:
    from logging_config import get_logger
    logger = get_logger("genesis.heartbeat")
except ImportError:
    logger = None


class SurpriseLevel(Enum):
    """Surprise classification levels."""
    MUNDANE = "mundane"       # < 0.3 - Expected, routine
    NOTABLE = "notable"       # 0.3-0.6 - Worth noting
    SURPRISING = "surprising" # 0.6-0.8 - Violated expectations
    SHOCKING = "shocking"     # > 0.8 - Major deviation


@dataclass
class SurpriseEvent:
    """An event with calculated surprise metrics."""
    event_id: str
    content: str
    source: str
    timestamp: str
    
    # Surprise dimensions
    prediction_error: float = 0.0   # How wrong were we?
    novelty: float = 0.0            # How new is this?
    impact: float = 0.0             # How consequential?
    rarity: float = 0.0             # How rare historically?
    
    # Computed
    total_surprise: float = 0.0
    level: SurpriseLevel = SurpriseLevel.MUNDANE
    
    # Metadata
    related_predictions: List[str] = field(default_factory=list)
    should_generate_axiom: bool = False


@dataclass
class Axiom:
    """A compressed learning - fundamental truth extracted from experience."""
    axiom_id: str
    statement: str
    confidence: float
    domain: str
    source_memories: List[str]
    created_at: str
    access_count: int = 0
    validated: bool = False
    
    def to_prompt_injection(self) -> str:
        """Format axiom for system prompt injection."""
        conf_str = "HIGH" if self.confidence > 0.8 else "MEDIUM" if self.confidence > 0.5 else "LOW"
        return f"[{self.domain.upper()}|{conf_str}] {self.statement}"


class EnhancedSurpriseSystem:
    """
    Enhanced surprise calculation based on prediction error.
    
    Unlike the stub in surprise_memory.py, this actually tracks predictions
    and calculates genuine surprise when outcomes differ.
    """
    
    def __init__(self, history_path: str = None):
        self.history_path = Path(history_path or "E:/genesis-system/data/surprise_history.json")
        self.predictions: Dict[str, Dict] = {}  # action_id -> prediction
        self.history: List[Dict] = []
        self.domain_baselines: Dict[str, float] = {}  # domain -> avg surprise
        self._load_history()
    
    def _load_history(self):
        """Load historical surprise data for baseline calculation."""
        if self.history_path.exists():
            try:
                with open(self.history_path) as f:
                    data = json.load(f)
                    self.history = data.get("history", [])[-1000:]  # Keep last 1000
                    self.domain_baselines = data.get("baselines", {})
            except:
                pass
    
    def _save_history(self):
        """Persist surprise history atomically."""
        self.history_path.parent.mkdir(parents=True, exist_ok=True)
        data = {
            "history": self.history[-1000:],
            "baselines": self.domain_baselines,
            "updated": datetime.now().isoformat()
        }
        if ATOMIC_IO_AVAILABLE and atomic_json_write:
            atomic_json_write(self.history_path, data)
        else:
            with open(self.history_path, 'w') as f:
                json.dump(data, f, indent=2)
    
    def predict(self, action_id: str, action: str, expected_outcome: str, 
                confidence: float = 0.5, domain: str = "general") -> str:
        """
        Register a prediction for later surprise calculation.
        
        Returns prediction_id for tracking.
        """
        prediction = {
            "action_id": action_id,
            "action": action,
            "expected": expected_outcome,
            "confidence": confidence,
            "domain": domain,
            "timestamp": datetime.now().isoformat()
        }
        self.predictions[action_id] = prediction
        return action_id
    
    def observe(self, action_id: str, actual_outcome: str, 
                impact_score: float = 0.5) -> SurpriseEvent:
        """
        Observe actual outcome and calculate surprise.
        
        This is the core of the Titans framework - real-time learning signals.
        """
        event_id = hashlib.sha256(f"{action_id}{datetime.now().isoformat()}".encode()).hexdigest()[:12]
        timestamp = datetime.now().isoformat()
        
        # Get prediction if exists
        prediction = self.predictions.pop(action_id, None)
        
        if prediction:
            # Calculate prediction error
            expected = prediction["expected"].lower()
            actual = actual_outcome.lower()
            
            # Simple semantic distance (in production, use embeddings)
            # For now: word overlap as proxy
            expected_words = set(expected.split())
            actual_words = set(actual.split())
            
            if expected_words and actual_words:
                overlap = len(expected_words & actual_words)
                total = len(expected_words | actual_words)
                similarity = overlap / total if total > 0 else 0
                prediction_error = 1.0 - similarity
            else:
                prediction_error = 0.5  # Unknown
            
            # Weight by prediction confidence
            prediction_error *= prediction["confidence"]
            domain = prediction["domain"]
            related = [prediction["action"]]
        else:
            # No prediction - treat as neutral observation
            prediction_error = 0.3
            domain = "general"
            related = []
        
        # Calculate novelty (how different from recent history)
        novelty = self._calculate_novelty(actual_outcome, domain)
        
        # Calculate rarity (how rare in domain history)
        rarity = self._calculate_rarity(actual_outcome, domain)
        
        # Total surprise = weighted combination
        total_surprise = (
            prediction_error * 0.4 +  # Prediction error is most important
            novelty * 0.25 +
            impact_score * 0.2 +
            rarity * 0.15
        )
        
        # Determine level
        if total_surprise < 0.3:
            level = SurpriseLevel.MUNDANE
        elif total_surprise < 0.6:
            level = SurpriseLevel.NOTABLE
        elif total_surprise < 0.8:
            level = SurpriseLevel.SURPRISING
        else:
            level = SurpriseLevel.SHOCKING
        
        # Should generate axiom?
        should_axiom = total_surprise > 0.6 or (total_surprise > 0.4 and impact_score > 0.7)
        
        event = SurpriseEvent(
            event_id=event_id,
            content=actual_outcome,
            source=action_id,
            timestamp=timestamp,
            prediction_error=round(prediction_error, 3),
            novelty=round(novelty, 3),
            impact=round(impact_score, 3),
            rarity=round(rarity, 3),
            total_surprise=round(total_surprise, 3),
            level=level,
            related_predictions=related,
            should_generate_axiom=should_axiom
        )
        
        # Update history
        self.history.append({
            "event_id": event_id,
            "domain": domain,
            "surprise": total_surprise,
            "timestamp": timestamp
        })
        
        # Update domain baseline
        domain_history = [h for h in self.history if h["domain"] == domain]
        if domain_history:
            self.domain_baselines[domain] = sum(h["surprise"] for h in domain_history[-100:]) / min(len(domain_history), 100)
        
        self._save_history()
        return event
    
    def _calculate_novelty(self, content: str, domain: str) -> float:
        """Calculate how novel this content is vs recent history."""
        # Get recent content in domain
        recent = [h for h in self.history[-50:] if h.get("domain") == domain]
        if not recent:
            return 0.7  # New domain = fairly novel
        
        # Simple: check if similar content appeared recently
        content_words = set(content.lower().split())
        overlap_scores = []
        
        for h in recent:
            # We don't store full content in history, so use event_id prefix as proxy
            # In production, this would use embeddings
            overlap_scores.append(0.3)  # Placeholder
        
        return 1.0 - (sum(overlap_scores) / len(overlap_scores)) if overlap_scores else 0.5
    
    def _calculate_rarity(self, content: str, domain: str) -> float:
        """Calculate how rare this type of event is historically."""
        domain_count = len([h for h in self.history if h.get("domain") == domain])
        total_count = len(self.history)
        
        if total_count == 0:
            return 0.5
        
        domain_ratio = domain_count / total_count
        # Less common domains = higher rarity
        return 1.0 - min(domain_ratio * 2, 1.0)


class AxiomGenerator:
    """
    Generates compressed learnings (Axioms) from high-surprise memories.
    
    This is the "Memory Consolidator" from the Titans framework.
    Axioms are injected into system prompts for persistent learning.
    """
    
    def __init__(self, axiom_path: str = None):
        self.axiom_path = Path(axiom_path or "E:/genesis-system/data/axioms.json")
        self.axioms: Dict[str, Axiom] = {}
        self._load_axioms()
        
        try:
            from anthropic import Anthropic
            api_key = get_anthropic_api_key()
            if api_key:
                self.client = Anthropic(api_key=api_key)
            else:
                self.client = None
                print("[WARNING] ANTHROPIC_API_KEY not found. Using fallback compression.")
        except ImportError:
            self.client = None
            print("[WARNING] anthropic package not installed. Using fallback compression.")

        # Initialize Gemini for fallback
        try:
            import google.generativeai as genai
            gemini_key = get_gemini_api_key()
            if gemini_key:
                genai.configure(api_key=gemini_key)
                # Corrected for 2026 stable API (Gemini 3)
                self.gemini_model = genai.GenerativeModel("gemini-3-flash")
            else:
                self.gemini_model = None
        except ImportError:
            self.gemini_model = None
    
    def _load_axioms(self):
        """Load existing axioms."""
        if self.axiom_path.exists():
            try:
                with open(self.axiom_path) as f:
                    data = json.load(f)
                    for aid, adict in data.get("axioms", {}).items():
                        self.axioms[aid] = Axiom(**adict)
            except Exception as e:
                print(f"Failed to load axioms: {e}")
    
    def _save_axioms(self):
        """Persist axioms atomically."""
        self.axiom_path.parent.mkdir(parents=True, exist_ok=True)
        data = {
            "axioms": {aid: asdict(a) for aid, a in self.axioms.items()},
            "count": len(self.axioms),
            "updated": datetime.now().isoformat()
        }
        if ATOMIC_IO_AVAILABLE and atomic_json_write:
            atomic_json_write(self.axiom_path, data)
        else:
            with open(self.axiom_path, 'w') as f:
                json.dump(data, f, indent=2)
    
    @reflexion_loop(max_retries=2)
    def generate_axiom(self, surprise_event: SurpriseEvent, 
                       memory_content: str,
                       domain: str = "general") -> Optional[Axiom]:
        """
        Generate an axiom from a high-surprise event.
        
        Only generates if surprise warrants it.
        """
        if not surprise_event.should_generate_axiom:
            return None
        
        # Create axiom statement (in production, use LLM to compress)
        # For now: extract key insight
        statement = self._compress_to_axiom(memory_content, surprise_event)
        
        if not statement:
            return None
        
        axiom_id = hashlib.sha256(f"{statement}{datetime.now().isoformat()}".encode()).hexdigest()[:10]
        
        axiom = Axiom(
            axiom_id=axiom_id,
            statement=statement,
            confidence=surprise_event.total_surprise,
            domain=domain,
            source_memories=[surprise_event.event_id],
            created_at=datetime.now().isoformat()
        )
        
        # Check for duplicate/similar axioms
        if not self._is_duplicate(axiom):
            self.axioms[axiom_id] = axiom
            self._save_axioms()
            return axiom
        
        return None
    
    def _compress_to_axiom(self, content: str, event: SurpriseEvent) -> str:
        """
        Compress memory content into axiom statement using LLM waterfall.
        1. Claude (Primary)
        2. Gemini (Secondary)
        3. Rule-based (Fallback)
        """
        prompt = f"""As the Genesis Axiom Generator, compress the following high-surprise event into a single, actionable fundamental truth (Axiom).

CONTENT:
{content}

SURPRISE METRICS:
- Total Surprise: {event.total_surprise}
- Prediction Error: {event.prediction_error}
- Level: {event.level.value}

RULES:
1. Output ONLY the axiom statement.
2. Be concise but precise.
3. Frame it as a generalized lesson for future autonomous operations.
4. Maximum 25 words.

AXIOM:"""

        # 1. Try Claude (Anthropic)
        if hasattr(self, 'client') and self.client:
            models_to_try = [
                "claude-3-5-sonnet-latest", 
                "claude-3-5-sonnet-20241022",
                "claude-3-5-sonnet-20240620",
                "claude-3-opus-latest",
                "claude-3-sonnet-latest"
            ]
            for model_name in models_to_try:
                try:
                    response = self.client.messages.create(
                        model=model_name,
                        max_tokens=100,
                        messages=[{"role": "user", "content": prompt}]
                    )
                    return response.content[0].text.strip().replace('"', '')
                except Exception as e:
                    if "404" in str(e) or "not_found" in str(e):
                        continue
                    print(f"[ERROR] Claude ({model_name}) Axiom compression failed: {e}")
                    break

        # 2. Try Gemini (Google)
        if hasattr(self, 'gemini_model') and self.gemini_model:
            import google.generativeai as genai
            models_to_try = [
                "gemini-3-flash",
                "gemini-2.5-flash",
                "gemini-1.5-flash",
                "gemini-1.5-flash-latest"
            ]
            for model_name in models_to_try:
                try:
                    model = genai.GenerativeModel(model_name)
                    response = model.generate_content(prompt)
                    return response.text.strip().replace('"', '')
                except Exception as e:
                    if "404" in str(e) or "not found" in str(e).lower():
                        continue
                    print(f"[ERROR] Gemini ({model_name}) Axiom compression failed: {e}")
                    break

        # 3. Rule-based Fallback
        if "error" in content.lower() or "failed" in content.lower():
            return f"When encountering errors: {content[:100]}..."
        elif "success" in content.lower() or "worked" in content.lower():
            return f"Successful pattern discovered: {content[:100]}..."
        elif event.prediction_error > 0.5:
            return f"Prediction violated: {content[:100]}..."
        else:
            return f"Notable observation: {content[:80]}..."
    
    def _is_duplicate(self, new_axiom: Axiom) -> bool:
        """Check if similar axiom already exists."""
        new_words = set(new_axiom.statement.lower().split())
        
        for existing in self.axioms.values():
            existing_words = set(existing.statement.lower().split())
            overlap = len(new_words & existing_words)
            if overlap / max(len(new_words), 1) > 0.7:
                return True
        return False
    
    def get_active_axioms(self, domain: str = None, limit: int = 10) -> List[Axiom]:
        """Get axioms for system prompt injection."""
        axioms = list(self.axioms.values())
        
        if domain:
            axioms = [a for a in axioms if a.domain == domain or a.domain == "general"]
        
        # Sort by confidence and recency
        axioms.sort(key=lambda a: (a.confidence, a.created_at), reverse=True)
        
        return axioms[:limit]
    
    def get_prompt_injection(self, domain: str = None, limit: int = 5) -> str:
        """
        Get formatted axioms for system prompt injection.
        
        This is the "Universal Primer" enhancement.
        """
        axioms = self.get_active_axioms(domain, limit)
        
        if not axioms:
            return ""
        
        lines = ["## Active Axioms (Learned Truths)"]
        for axiom in axioms:
            lines.append(axiom.to_prompt_injection())
        
        return "\n".join(lines)


class GenesisHeartbeat:
    """
    The Genesis Heartbeat - connects all cognitive systems.
    
    Every heartbeat:
    1. Blackboard → Collect recent events
    2. Surprise System → Score events
    3. High-surprise → Generate Axioms
    4. Memory Cortex → Store appropriately
    5. Axiom Generator → Update system prompts
    6. Loop
    """
    
    def __init__(self, heartbeat_interval: int = 600, scheduler: Optional[CircadianScheduler] = None):  # 10 minutes default
        self.interval = heartbeat_interval
        self.running = False
        self.last_beat = None
        self.beat_count = 0
        self.scheduler = scheduler
        
        # State file for persistence across restarts
        self.state_path = Path("E:/genesis-system/data/heartbeat_state.json")
        
        # Initialize components
        print("[HEARTBEAT] Initializing cognitive components...")
        
        self.blackboard = Blackboard(
            persist_path="E:/genesis-system/data/blackboard_state.json",
            use_redis=True
        )
        
        self.cortex = MemoryCortex(enable_vectors=True)
        self.surprise = EnhancedSurpriseSystem()
        self.axiom_gen = AxiomGenerator()
        self.meta_agent = MetaAgent()
        
        # Load state
        self._load_state()
        
        print("[HEARTBEAT] All systems initialized.")
    
    def _load_state(self):
        """Load heartbeat state."""
        if self.state_path.exists():
            try:
                with open(self.state_path) as f:
                    state = json.load(f)
                    self.beat_count = state.get("beat_count", 0)
                    self.last_beat = state.get("last_beat")
            except:
                pass
    
    def _save_state(self):
        """Save heartbeat state atomically."""
        self.state_path.parent.mkdir(parents=True, exist_ok=True)
        data = {
            "beat_count": self.beat_count,
            "last_beat": self.last_beat,
            "running": self.running
        }
        if ATOMIC_IO_AVAILABLE and atomic_json_write:
            atomic_json_write(self.state_path, data)
        else:
            with open(self.state_path, 'w') as f:
                json.dump(data, f, indent=2)
    
    def pulse(self) -> Dict[str, Any]:
        """
        Execute a single heartbeat cycle.
        
        This is the core loop that makes Genesis "alive".
        """
        pulse_start = datetime.now()
        results = {
            "beat_number": self.beat_count + 1,
            "timestamp": pulse_start.isoformat(),
            "events_processed": 0,
            "surprises_detected": 0,
            "axioms_generated": 0,
            "memories_stored": 0,
            "consolidations": {}
        }
        
        print(f"\n💓 HEARTBEAT #{results['beat_number']} - {pulse_start.isoformat()}")
        
        # ═══════════════════════════════════════════════════════════════
        # PHASE 1: COLLECT - Gather recent blackboard events
        # ═══════════════════════════════════════════════════════════════
        print("  [1/5] Collecting blackboard events...")
        
        # Get events since last heartbeat
        cutoff = datetime.fromisoformat(self.last_beat) if self.last_beat else (pulse_start - timedelta(hours=1))
        
        recent_findings = self.blackboard.query(
            entry_type=EntryType.FINDING,
            limit=50
        )
        recent_decisions = self.blackboard.query(
            entry_type=EntryType.DECISION,
            limit=20
        )
        completed_tasks = self.blackboard.query(
            entry_type=EntryType.TASK,
            status=TaskStatus.COMPLETED,
            limit=30
        )
        
        all_events = recent_findings + recent_decisions + completed_tasks
        results["events_processed"] = len(all_events)
        print(f"      Found {len(all_events)} events to process")
        
        # ═══════════════════════════════════════════════════════════════
        # PHASE 2: SURPRISE - Score events for learning signals
        # ═══════════════════════════════════════════════════════════════
        print("  [2/5] Calculating surprise scores...")
        
        surprise_events = []
        for event in all_events:
            # Calculate impact based on entry type
            if event.entry_type == EntryType.DECISION:
                impact = 0.8
            elif event.entry_type == EntryType.TASK and event.status == TaskStatus.COMPLETED:
                impact = 0.6
            else:
                impact = event.confidence
            
            # Create observation
            content = json.dumps(event.content) if isinstance(event.content, dict) else str(event.content)
            surprise_event = self.surprise.observe(
                action_id=event.id,
                actual_outcome=content[:500],  # Truncate
                impact_score=impact
            )
            
            if surprise_event.level in [SurpriseLevel.SURPRISING, SurpriseLevel.SHOCKING]:
                surprise_events.append((event, surprise_event))
                results["surprises_detected"] += 1
        
        print(f"      Detected {results['surprises_detected']} surprising events")
        
        # ═══════════════════════════════════════════════════════════════
        # PHASE 3: AXIOM GENERATION - Compress high-surprise into learnings
        # ═══════════════════════════════════════════════════════════════
        print("  [3/5] Generating axioms from surprises...")
        
        # PATENT COMPLIANCE: Check if axiom generation should run now
        should_gen = True
        if self.scheduler:
            should_gen, reason = self.scheduler.should_run_task(TaskType.AXIOM_GENERATION)
            if not should_gen:
                print(f"      ⏳ Deferring axiom generation: {reason}")
                # Schedule it for later if not already scheduled
                self.scheduler.schedule_heavy_task(TaskType.AXIOM_GENERATION)
        
        if should_gen:
            for bb_event, surprise_event in surprise_events:
                content = json.dumps(bb_event.content) if isinstance(bb_event.content, dict) else str(bb_event.content)
                domain = bb_event.tags[0] if bb_event.tags else "general"
                
                axiom = self.axiom_gen.generate_axiom(
                    surprise_event=surprise_event,
                    memory_content=content,
                    domain=domain
                )
                
                if axiom:
                    results["axioms_generated"] += 1
                    print(f"      📜 New axiom: {axiom.statement[:60]}...")
            
            if self.scheduler:
                self.scheduler.record_task_completion(TaskType.AXIOM_GENERATION)
        
        # ═══════════════════════════════════════════════════════════════
        # PHASE 4: MEMORY STORAGE - Route to appropriate tier
        # ═══════════════════════════════════════════════════════════════
        print("  [4/5] Storing memories in cortex...")
        
        for bb_event, surprise_event in surprise_events:
            content = json.dumps(bb_event.content) if isinstance(bb_event.content, dict) else str(bb_event.content)
            domain = bb_event.tags[0] if bb_event.tags else "general"
            
            # Store in cortex with surprise-informed tier
            if surprise_event.total_surprise > 0.7:
                force_tier = MemoryTier.SEMANTIC
            elif surprise_event.total_surprise > 0.5:
                force_tier = MemoryTier.EPISODIC
            else:
                force_tier = None
            
            try:
                mem_result = self.cortex.remember(
                    content=content[:1000],
                    source=f"heartbeat_{results['beat_number']}",
                    domain=domain,
                    metadata={
                        "surprise_score": surprise_event.total_surprise,
                        "surprise_level": surprise_event.level.value,
                        "blackboard_id": bb_event.id
                    },
                    force_tier=force_tier
                )
                results["memories_stored"] += 1
            except Exception as e:
                print(f"      ⚠️ Memory storage failed: {e}")
        
        print(f"      Stored {results['memories_stored']} memories")
        
        # ═══════════════════════════════════════════════════════════════
        # PHASE 5: CONSOLIDATION - Run memory promotion cycle
        # ═══════════════════════════════════════════════════════════════
        print("  [5/5] Running memory consolidation...")
        
        should_consolidate = True
        if self.scheduler:
            should_consolidate, reason = self.scheduler.should_run_task(TaskType.CONSOLIDATION)
            if not should_consolidate:
                print(f"      ⏳ Deferring consolidation: {reason}")
                self.scheduler.schedule_heavy_task(TaskType.CONSOLIDATION)
        
        if should_consolidate:
            try:
                # Direct call to cortex.consolidate() which now has assertion guards
                consolidation = self.cortex.consolidate()
                results["consolidations"] = consolidation
                print(f"      Promoted: {consolidation}")
                if self.scheduler:
                    self.scheduler.record_task_completion(TaskType.CONSOLIDATION)
            except Exception as e:
                print(f"      ⚠️ Consolidation error: {e}")
        
        # ═══════════════════════════════════════════════════════════════
        # COMPLETE - Update state
        # ═══════════════════════════════════════════════════════════════
        self.beat_count += 1
        self.last_beat = pulse_start.isoformat()
        self._save_state()
        
        duration = (datetime.now() - pulse_start).total_seconds()
        results["duration_seconds"] = round(duration, 2)
        
        print(f"💓 HEARTBEAT COMPLETE - {duration:.2f}s")
        
        return results
    
    def start(self):
        """Start the heartbeat daemon."""
        if self.running:
            print("Heartbeat already running")
            return
        
        self.running = True
        self._save_state()
        
        print(f"🫀 Starting Genesis Heartbeat (interval: {self.interval}s)")
        
        def beat_loop():
            while self.running:
                try:
                    self.pulse()
                except Exception as e:
                    print(f"❌ Heartbeat error: {e}")
                
                # Sleep in small increments to allow clean shutdown
                for _ in range(self.interval):
                    if not self.running:
                        break
                    time.sleep(1)
        
        self.thread = threading.Thread(target=beat_loop, daemon=True)
        self.thread.start()
    
    def stop(self):
        """Stop the heartbeat daemon."""
        print("🛑 Stopping Genesis Heartbeat...")
        self.running = False
        self._save_state()
    
    def status(self) -> Dict[str, Any]:
        """Get heartbeat status."""
        return {
            "running": self.running,
            "beat_count": self.beat_count,
            "last_beat": self.last_beat,
            "interval_seconds": self.interval,
            "axiom_count": len(self.axiom_gen.axioms),
            "cortex_stats": self.cortex.get_stats(),
            "blackboard_stats": self.blackboard.stats()
        }
    
    def get_axioms_for_prompt(self, domain: str = None) -> str:
        """Get axiom injection for system prompts."""
        return self.axiom_gen.get_prompt_injection(domain)


# ═══════════════════════════════════════════════════════════════════════════════
# CLI Interface
# ═══════════════════════════════════════════════════════════════════════════════
if __name__ == "__main__":
    import sys
    
    if len(sys.argv) < 2:
        print("""
Genesis Heartbeat - The Circulatory System
==========================================

Commands:
  start              Start heartbeat daemon (background)
  pulse              Execute single heartbeat cycle
  status             Show heartbeat status
  axioms             Show current axioms
  axioms <domain>    Show axioms for specific domain
  inject             Get axiom injection for prompts

Examples:
  python genesis_heartbeat.py pulse
  python genesis_heartbeat.py axioms technical
  python genesis_heartbeat.py inject
        """)
        sys.exit(0)
    
    command = sys.argv[1]
    heartbeat = GenesisHeartbeat()
    
    if command == "start":
        heartbeat.start()
        print("Heartbeat started. Press Ctrl+C to stop.")
        try:
            while True:
                time.sleep(1)
        except KeyboardInterrupt:
            heartbeat.stop()
    
    elif command == "pulse":
        result = heartbeat.pulse()
        print("\n" + "="*60)
        print(json.dumps(result, indent=2))
    
    elif command == "status":
        status = heartbeat.status()
        print(json.dumps(status, indent=2))
    
    elif command == "axioms":
        domain = sys.argv[2] if len(sys.argv) > 2 else None
        axioms = heartbeat.axiom_gen.get_active_axioms(domain)
        
        if not axioms:
            print("No axioms generated yet.")
        else:
            print(f"\n{'='*60}")
            print(f"GENESIS AXIOMS ({len(axioms)} total)")
            print(f"{'='*60}\n")
            for axiom in axioms:
                print(f"[{axiom.domain}] ({axiom.confidence:.2f})")
                print(f"  {axiom.statement}")
                print(f"  Created: {axiom.created_at}")
                print()
    
    elif command == "inject":
        domain = sys.argv[2] if len(sys.argv) > 2 else None
        injection = heartbeat.get_axioms_for_prompt(domain)
        if injection:
            print(injection)
        else:
            print("No axioms to inject.")
    
    else:
        print(f"Unknown command: {command}")
        sys.exit(1)
