#!/usr/bin/env python3
"""
Context Engineering Skill for Genesis System
=============================================

Production-ready context management implementing patterns from:
- Anthropic's context engineering research
- Progressive disclosure techniques
- Sub-agent delegation patterns
- Token-efficient context management

This skill provides intelligent context loading, compression, and injection
for optimal LLM performance within token budgets.

Usage:
    from context_engineering import ContextManager, ContextHooks

    # Initialize with memory integration
    ctx = ContextManager()

    # Load relevant context for a query
    context = ctx.load_relevant_context("implement user authentication")

    # Progressive disclosure
    disclosed = ctx.progressive_disclosure(context, depth=2)

    # Inject into prompt
    final_prompt = ctx.inject_context(user_prompt, disclosed)

Video Reference: ySA9tJ8RfVM - Context Engineering patterns
"""

import json
import hashlib
import re
import sys
sys.path.append('/mnt/e/genesis-system/data/genesis-memory')
from elestio_config import PostgresConfig
import psycopg2
import psycopg2.extras
import threading
import time
from abc import ABC, abstractmethod
from dataclasses import dataclass, asdict, field
from datetime import datetime, timedelta
from enum import Enum
from pathlib import Path
from typing import Dict, List, Any, Optional, Tuple, Callable, Union
import logging

# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# Genesis system path
GENESIS_PATH = Path("/mnt/e/genesis-system")


# =============================================================================
# Token Budget Management
# =============================================================================

class TokenBudgetManager:
    """
    Manages token budgets for context windows.

    Implements efficient token estimation and budget allocation
    to prevent context overflow while maximizing information density.
    """

    # Average characters per token for different content types
    CHARS_PER_TOKEN = {
        "code": 3.5,      # Code is more token-dense
        "prose": 4.0,     # Natural language
        "json": 3.0,      # JSON has more special characters
        "mixed": 3.8,     # Default mixed content
    }

    # Model context limits (in tokens)
    MODEL_LIMITS = {
        "claude-3-opus": 200000,
        "claude-3-sonnet": 200000,
        "claude-3-haiku": 200000,
        "claude-opus-4-5": 200000,
        "gpt-4-turbo": 128000,
        "gpt-4": 8192,
        "default": 100000,
    }

    def __init__(self, model: str = "default", reserve_ratio: float = 0.2):
        """
        Initialize the token budget manager.

        Args:
            model: Model name for context limit lookup
            reserve_ratio: Fraction of budget reserved for response (default 20%)
        """
        self.model = model
        self.max_tokens = self.MODEL_LIMITS.get(model, self.MODEL_LIMITS["default"])
        self.reserve_ratio = reserve_ratio
        self.available_budget = int(self.max_tokens * (1 - reserve_ratio))
        self.allocated = 0
        self._lock = threading.Lock()

    def estimate_tokens(self, text: str, content_type: str = "mixed") -> int:
        """
        Estimate token count for text.

        Uses character-based estimation with content-type adjustments.
        More accurate than naive word counting.

        Args:
            text: Text to estimate
            content_type: Type of content (code, prose, json, mixed)

        Returns:
            Estimated token count
        """
        if not text:
            return 0

        chars_per_token = self.CHARS_PER_TOKEN.get(content_type, self.CHARS_PER_TOKEN["mixed"])

        # Adjust for special patterns
        estimated = len(text) / chars_per_token

        # Code tends to have more whitespace and special chars
        if content_type == "code":
            # Count newlines and indentation
            newlines = text.count('\n')
            estimated += newlines * 0.5

        # JSON has many structural tokens
        elif content_type == "json":
            brackets = text.count('{') + text.count('}') + text.count('[') + text.count(']')
            estimated += brackets * 0.3

        return int(estimated)

    def allocate(self, tokens: int) -> bool:
        """
        Attempt to allocate tokens from budget.

        Args:
            tokens: Number of tokens to allocate

        Returns:
            True if allocation succeeded, False if budget exceeded
        """
        with self._lock:
            if self.allocated + tokens <= self.available_budget:
                self.allocated += tokens
                return True
            return False

    def release(self, tokens: int):
        """Release previously allocated tokens."""
        with self._lock:
            self.allocated = max(0, self.allocated - tokens)

    def get_remaining(self) -> int:
        """Get remaining available tokens."""
        with self._lock:
            return self.available_budget - self.allocated

    def reset(self):
        """Reset allocated tokens to zero."""
        with self._lock:
            self.allocated = 0

    def trim_to_budget(self, text: str, budget: int,
                       preserve_start: bool = True,
                       content_type: str = "mixed") -> str:
        """
        Trim text to fit within token budget.

        Args:
            text: Text to trim
            budget: Maximum tokens allowed
            preserve_start: If True, keep beginning; else keep end
            content_type: Type of content for estimation

        Returns:
            Trimmed text within budget
        """
        current_tokens = self.estimate_tokens(text, content_type)

        if current_tokens <= budget:
            return text

        # Calculate approximate character limit
        chars_per_token = self.CHARS_PER_TOKEN.get(content_type, self.CHARS_PER_TOKEN["mixed"])
        target_chars = int(budget * chars_per_token * 0.95)  # 5% safety margin

        if preserve_start:
            trimmed = text[:target_chars]
            # Try to end at a sentence or line boundary
            last_period = trimmed.rfind('. ')
            last_newline = trimmed.rfind('\n')
            cut_point = max(last_period, last_newline)
            if cut_point > target_chars * 0.8:
                trimmed = trimmed[:cut_point + 1]
            trimmed += "\n... [truncated]"
        else:
            trimmed = text[-target_chars:]
            # Try to start at a sentence or line boundary
            first_period = trimmed.find('. ')
            first_newline = trimmed.find('\n')
            if first_period > 0 and first_period < target_chars * 0.2:
                trimmed = trimmed[first_period + 2:]
            elif first_newline > 0 and first_newline < target_chars * 0.2:
                trimmed = trimmed[first_newline + 1:]
            trimmed = "[truncated] ...\n" + trimmed

        return trimmed

    def get_stats(self) -> Dict[str, Any]:
        """Get budget statistics."""
        with self._lock:
            return {
                "model": self.model,
                "max_tokens": self.max_tokens,
                "available_budget": self.available_budget,
                "allocated": self.allocated,
                "remaining": self.available_budget - self.allocated,
                "utilization": round(self.allocated / self.available_budget, 3) if self.available_budget > 0 else 0
            }


# =============================================================================
# Context Priority and Relevance
# =============================================================================

class ContextPriority(Enum):
    """Priority levels for context items."""
    CRITICAL = 5    # Must include - system instructions, constraints
    HIGH = 4        # Very important - recent relevant memories
    MEDIUM = 3      # Useful - related context
    LOW = 2         # Nice to have - background info
    MINIMAL = 1     # Only if space allows


@dataclass
class ContextItem:
    """Represents a piece of context with metadata."""
    content: str
    source: str                          # Where this came from
    priority: ContextPriority = ContextPriority.MEDIUM
    relevance_score: float = 0.5         # 0-1 relevance to current query
    timestamp: str = ""                  # When this was created/retrieved
    token_count: int = 0                 # Estimated tokens
    metadata: Dict[str, Any] = field(default_factory=dict)

    def __post_init__(self):
        if not self.timestamp:
            self.timestamp = datetime.now().isoformat()
        if self.token_count == 0:
            # Quick estimate
            self.token_count = len(self.content) // 4


@dataclass
class ContextBundle:
    """Collection of context items for a task."""
    items: List[ContextItem] = field(default_factory=list)
    query: str = ""                      # Original query
    total_tokens: int = 0
    created: str = ""

    def __post_init__(self):
        if not self.created:
            self.created = datetime.now().isoformat()
        self._update_tokens()

    def _update_tokens(self):
        """Update total token count."""
        self.total_tokens = sum(item.token_count for item in self.items)

    def add(self, item: ContextItem):
        """Add a context item."""
        self.items.append(item)
        self._update_tokens()

    def get_by_priority(self, min_priority: ContextPriority = ContextPriority.LOW) -> List[ContextItem]:
        """Get items at or above priority level."""
        return [item for item in self.items if item.priority.value >= min_priority.value]

    def to_text(self, include_sources: bool = False) -> str:
        """Convert bundle to text for injection."""
        parts = []
        for item in sorted(self.items, key=lambda x: (-x.priority.value, -x.relevance_score)):
            if include_sources:
                parts.append(f"[Source: {item.source}]\n{item.content}")
            else:
                parts.append(item.content)
        return "\n\n".join(parts)


class ContextRelevanceScorer:
    """
    Scores context relevance to queries using keyword and semantic matching.
    """

    # Domain-specific keywords for boosting
    DOMAIN_KEYWORDS = {
        "memory": ["remember", "recall", "store", "retrieve", "context", "episodic", "semantic"],
        "code": ["function", "class", "variable", "import", "method", "api", "implement"],
        "automation": ["workflow", "trigger", "schedule", "n8n", "hook", "event"],
        "learning": ["learned", "discovered", "pattern", "insight", "lesson"],
    }

    def __init__(self):
        self._stopwords = {'the', 'a', 'an', 'is', 'are', 'was', 'were', 'be', 'been',
                          'to', 'of', 'and', 'in', 'that', 'it', 'for', 'on', 'with',
                          'this', 'by', 'from', 'or', 'as', 'at', 'but', 'if', 'so'}

    def score(self, query: str, context: str, context_metadata: Dict = None) -> float:
        """
        Score relevance of context to query.

        Args:
            query: User query or task description
            context: Context text to score
            context_metadata: Optional metadata for boosting

        Returns:
            Relevance score 0-1
        """
        query_terms = self._tokenize(query)
        context_terms = self._tokenize(context)

        if not query_terms or not context_terms:
            return 0.0

        # Term overlap score
        overlap = len(query_terms & context_terms)
        jaccard = overlap / len(query_terms | context_terms)

        # Query coverage score (what fraction of query terms appear)
        coverage = overlap / len(query_terms) if query_terms else 0

        # Domain boost
        domain_boost = 0
        metadata = context_metadata or {}
        domain = metadata.get("domain", "")

        if domain in self.DOMAIN_KEYWORDS:
            domain_terms = set(self.DOMAIN_KEYWORDS[domain])
            if query_terms & domain_terms:
                domain_boost = 0.15

        # Recency boost (if timestamp available)
        recency_boost = 0
        if "timestamp" in metadata:
            try:
                ts = datetime.fromisoformat(metadata["timestamp"])
                age_hours = (datetime.now() - ts).total_seconds() / 3600
                if age_hours < 24:
                    recency_boost = 0.1 * (1 - age_hours / 24)
            except (ValueError, TypeError):
                pass

        # Combine scores
        base_score = (jaccard * 0.4) + (coverage * 0.6)
        final_score = min(1.0, base_score + domain_boost + recency_boost)

        return round(final_score, 3)

    def _tokenize(self, text: str) -> set:
        """Tokenize text into meaningful terms."""
        words = re.findall(r'\b[a-z]+\b', text.lower())
        return {w for w in words if w not in self._stopwords and len(w) > 2}

    def prioritize_contexts(self, query: str, contexts: List[Tuple[str, Dict]]) -> List[Tuple[str, Dict, float]]:
        """
        Rank contexts by relevance to query.

        Args:
            query: User query
            contexts: List of (content, metadata) tuples

        Returns:
            List of (content, metadata, score) sorted by relevance
        """
        scored = []
        for content, metadata in contexts:
            score = self.score(query, content, metadata)
            scored.append((content, metadata, score))

        return sorted(scored, key=lambda x: x[2], reverse=True)


# =============================================================================
# Progressive Disclosure
# =============================================================================

class ProgressiveDisclosure:
    """
    Implements progressive disclosure of context.

    Instead of loading everything upfront, reveals context in stages
    based on relevance and need. This mirrors human cognition where
    we retrieve information just-in-time rather than pre-loading everything.
    """

    DEPTH_LEVELS = {
        0: "overview",      # High-level summary only
        1: "essential",     # Core relevant information
        2: "detailed",      # Full context with examples
        3: "comprehensive", # Everything including edge cases
    }

    def __init__(self, budget_manager: TokenBudgetManager = None):
        self.budget = budget_manager or TokenBudgetManager()
        self.scorer = ContextRelevanceScorer()

    def disclose(self, context: ContextBundle, depth: int = 1,
                 budget: int = None) -> ContextBundle:
        """
        Progressively disclose context based on depth level.

        Args:
            context: Full context bundle
            depth: Disclosure depth (0-3)
            budget: Optional token budget override

        Returns:
            New ContextBundle with disclosed items
        """
        depth = max(0, min(3, depth))  # Clamp to valid range
        token_budget = budget or self._get_budget_for_depth(depth)

        disclosed = ContextBundle(query=context.query)
        used_tokens = 0

        # Sort by priority and relevance
        sorted_items = sorted(
            context.items,
            key=lambda x: (x.priority.value, x.relevance_score),
            reverse=True
        )

        for item in sorted_items:
            # At depth 0, only include CRITICAL items
            if depth == 0 and item.priority != ContextPriority.CRITICAL:
                continue

            # At depth 1, include HIGH and CRITICAL
            if depth == 1 and item.priority.value < ContextPriority.HIGH.value:
                continue

            # At depth 2, include MEDIUM and above
            if depth == 2 and item.priority.value < ContextPriority.MEDIUM.value:
                continue

            # Check if fits in budget
            if used_tokens + item.token_count <= token_budget:
                disclosed.add(item)
                used_tokens += item.token_count
            elif depth >= 2:
                # At higher depths, try to fit partial content
                remaining = token_budget - used_tokens
                if remaining > 100:  # Minimum useful content
                    trimmed_content = self.budget.trim_to_budget(
                        item.content, remaining, preserve_start=True
                    )
                    trimmed_item = ContextItem(
                        content=trimmed_content,
                        source=item.source,
                        priority=item.priority,
                        relevance_score=item.relevance_score,
                        metadata={**item.metadata, "trimmed": True}
                    )
                    disclosed.add(trimmed_item)
                    break

        return disclosed

    def _get_budget_for_depth(self, depth: int) -> int:
        """Get token budget for depth level."""
        base_budget = self.budget.available_budget

        # Allocate different fractions based on depth
        fractions = {
            0: 0.1,   # 10% for overview
            1: 0.3,   # 30% for essential
            2: 0.6,   # 60% for detailed
            3: 0.9,   # 90% for comprehensive
        }

        return int(base_budget * fractions.get(depth, 0.5))

    def create_summary(self, context: ContextBundle, max_tokens: int = 500) -> str:
        """
        Create a summary of context for overview depth.

        Args:
            context: Full context bundle
            max_tokens: Maximum tokens for summary

        Returns:
            Summary text
        """
        if not context.items:
            return "No relevant context available."

        # Group by source
        by_source = {}
        for item in context.items:
            source = item.source
            if source not in by_source:
                by_source[source] = []
            by_source[source].append(item)

        summary_parts = [f"Context Summary ({len(context.items)} items from {len(by_source)} sources):"]

        for source, items in by_source.items():
            high_priority = [i for i in items if i.priority.value >= ContextPriority.HIGH.value]
            summary_parts.append(f"\n- {source}: {len(items)} items ({len(high_priority)} high priority)")

            # Add brief description of top item
            if high_priority:
                top = high_priority[0]
                brief = top.content[:100] + "..." if len(top.content) > 100 else top.content
                summary_parts.append(f"  Top: {brief}")

        summary = "\n".join(summary_parts)
        return self.budget.trim_to_budget(summary, max_tokens)


# =============================================================================
# Context Compression
# =============================================================================

class ContextCompressor:
    """
    Compresses context to fit within token budgets.

    Implements two strategies:
    1. Compaction: Remove redundant/recoverable information
    2. Summarization: Create concise summaries of content
    """

    def __init__(self, budget_manager: TokenBudgetManager = None):
        self.budget = budget_manager or TokenBudgetManager()

    def compress(self, context: str, target_tokens: int,
                 strategy: str = "hybrid") -> str:
        """
        Compress context to fit target token count.

        Args:
            context: Context to compress
            target_tokens: Target token count
            strategy: Compression strategy (compact, summarize, hybrid)

        Returns:
            Compressed context
        """
        current_tokens = self.budget.estimate_tokens(context)

        if current_tokens <= target_tokens:
            return context

        if strategy == "compact":
            return self._compact(context, target_tokens)
        elif strategy == "summarize":
            return self._summarize(context, target_tokens)
        else:
            # Hybrid: compact first, then summarize if needed
            compacted = self._compact(context, target_tokens)
            if self.budget.estimate_tokens(compacted) > target_tokens:
                return self._summarize(compacted, target_tokens)
            return compacted

    def _compact(self, context: str, target_tokens: int) -> str:
        """
        Compact context by removing redundant information.

        Reversible compression - removes info that can be recovered
        from the environment (file contents, repeated info, etc.)
        """
        lines = context.split('\n')
        compacted_lines = []
        seen_content = set()

        for line in lines:
            stripped = line.strip()

            # Skip empty lines and duplicates
            if not stripped:
                continue

            # Hash for deduplication
            line_hash = hashlib.md5(stripped.lower().encode()).hexdigest()[:8]
            if line_hash in seen_content:
                continue
            seen_content.add(line_hash)

            # Skip verbose patterns
            if self._is_verbose(stripped):
                continue

            compacted_lines.append(line)

        compacted = '\n'.join(compacted_lines)

        # If still over budget, trim
        if self.budget.estimate_tokens(compacted) > target_tokens:
            compacted = self.budget.trim_to_budget(compacted, target_tokens)

        return compacted

    def _is_verbose(self, line: str) -> bool:
        """Check if line is verbose/removable."""
        verbose_patterns = [
            r'^[\s]*#\s*-+\s*$',              # Comment dividers
            r'^[\s]*#+\s*$',                   # Empty comments
            r'^\s*print\s*\(',                 # Debug prints
            r'^\s*console\.log\s*\(',          # JS debug
            r'^\s*logger\.(debug|trace)',      # Debug logs
            r'^[\s]*$',                        # Empty lines
            r'^[\s]*pass\s*$',                 # Pass statements
        ]

        for pattern in verbose_patterns:
            if re.match(pattern, line):
                return True
        return False

    def _summarize(self, context: str, target_tokens: int) -> str:
        """
        Create a summary of context.

        Non-reversible but preserves key information.
        """
        # Extract key sentences
        sentences = re.split(r'[.!?]+', context)

        # Score sentences by keyword density
        scored = []
        keywords = {'important', 'critical', 'must', 'should', 'error', 'success',
                   'learned', 'discovered', 'note', 'warning', 'remember'}

        for sent in sentences:
            sent = sent.strip()
            if len(sent) < 20:
                continue

            words = set(sent.lower().split())
            keyword_count = len(words & keywords)

            # Boost for sentences with code indicators
            if re.search(r'`[^`]+`', sent):
                keyword_count += 1

            scored.append((keyword_count, sent))

        # Sort by score and build summary
        scored.sort(key=lambda x: x[0], reverse=True)

        summary_parts = ["[Summarized Context]"]
        used_tokens = 10  # Header overhead

        for score, sent in scored:
            sent_tokens = self.budget.estimate_tokens(sent)
            if used_tokens + sent_tokens + 2 <= target_tokens:
                summary_parts.append("- " + sent)
                used_tokens += sent_tokens + 2
            else:
                break

        return '\n'.join(summary_parts)

    def compress_bundle(self, bundle: ContextBundle, target_tokens: int) -> ContextBundle:
        """
        Compress a context bundle to fit target.

        Args:
            bundle: ContextBundle to compress
            target_tokens: Target token count

        Returns:
            New compressed ContextBundle
        """
        if bundle.total_tokens <= target_tokens:
            return bundle

        compressed = ContextBundle(query=bundle.query)
        remaining = target_tokens

        # Sort by priority and relevance
        sorted_items = sorted(
            bundle.items,
            key=lambda x: (x.priority.value, x.relevance_score),
            reverse=True
        )

        for item in sorted_items:
            if remaining <= 0:
                break

            # Allocate proportionally based on priority
            allocation = min(
                item.token_count,
                int(remaining * (item.priority.value / 5))
            )
            allocation = max(allocation, 50)  # Minimum useful content

            if item.token_count <= allocation:
                compressed.add(item)
                remaining -= item.token_count
            else:
                # Compress individual item
                compressed_content = self.compress(item.content, allocation)
                compressed_item = ContextItem(
                    content=compressed_content,
                    source=item.source,
                    priority=item.priority,
                    relevance_score=item.relevance_score,
                    metadata={**item.metadata, "compressed": True}
                )
                compressed.add(compressed_item)
                remaining -= compressed_item.token_count

        return compressed


# =============================================================================
# Memory Integration
# =============================================================================

class MemoryContextLoader:
    """
    Loads context from Genesis memory architecture.

    Integrates with:
    - Semantic Memory: Knowledge graph entities
    - Episodic Memory: Recent experiences
    - Trigger Memory: Automation rules
    """

    def __init__(self, genesis_path: Path = GENESIS_PATH):
        self.genesis_path = genesis_path
        self.scorer = ContextRelevanceScorer()
        self._init_connections()

    def _init_connections(self):
        """Initialize connections to memory stores."""
        # Episodic memory (Elestio PostgreSQL)
        self.pg_conn_params = PostgresConfig.get_connection_params()

        # Semantic memory log
        self.semantic_log = self.genesis_path / "semantic_memory_log.json"

        # Memory store (surprise-based)
        self.memory_store = self.genesis_path / "memory_store.json"

    def load_relevant_context(self, query: str,
                              sources: List[str] = None,
                              max_items: int = 10) -> ContextBundle:
        """
        Load relevant context from all memory sources.

        Args:
            query: Query to find relevant context for
            sources: List of sources to query (episodic, semantic, trigger)
            max_items: Maximum items to return

        Returns:
            ContextBundle with relevant items
        """
        sources = sources or ["episodic", "semantic", "trigger"]
        bundle = ContextBundle(query=query)

        if "episodic" in sources:
            episodic_items = self._load_from_episodic(query, max_items // 2)
            for item in episodic_items:
                bundle.add(item)

        if "semantic" in sources:
            semantic_items = self._load_from_semantic(query, max_items // 3)
            for item in semantic_items:
                bundle.add(item)

        if "trigger" in sources:
            trigger_items = self._load_from_triggers(query, max_items // 4)
            for item in trigger_items:
                bundle.add(item)

        return bundle

    def _load_from_episodic(self, query: str, limit: int) -> List[ContextItem]:
        """Load from episodic memory (Elestio PostgreSQL)."""
        items = []

        try:
            conn = psycopg2.connect(**self.pg_conn_params)
            cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)

            # Use ILIKE-based search (PostgreSQL full-text search fallback)
            # Build search terms from query words
            search_terms = [w.strip() for w in query.split() if len(w.strip()) > 2]

            if search_terms:
                # Match any of the search terms in content
                conditions = " OR ".join(["content ILIKE %s"] * len(search_terms))
                params = [f"%{term}%" for term in search_terms]
                params.append(limit)

                cur.execute(f"""
                    SELECT * FROM em_episodic_memories
                    WHERE {conditions}
                    ORDER BY score DESC NULLS LAST
                    LIMIT %s
                """, params)
            else:
                cur.execute("""
                    SELECT * FROM em_episodic_memories
                    ORDER BY score DESC NULLS LAST
                    LIMIT %s
                """, (limit,))

            results = cur.fetchall()
            cur.close()
            conn.close()

            for row in results:
                relevance = self.scorer.score(
                    query, row['content'],
                    {"domain": row.get('domain', ''), "timestamp": row.get('timestamp', '')}
                )

                # Map score to priority
                score = row.get('score') or 0.5
                if score >= 0.8:
                    priority = ContextPriority.HIGH
                elif score >= 0.5:
                    priority = ContextPriority.MEDIUM
                else:
                    priority = ContextPriority.LOW

                items.append(ContextItem(
                    content=row['content'],
                    source=f"episodic:{row.get('domain', 'unknown')}",
                    priority=priority,
                    relevance_score=relevance,
                    timestamp=row.get('timestamp', '') or "",
                    metadata={
                        "memory_id": row.get('id'),
                        "domain": row.get('domain', ''),
                        "source": row.get('source', ''),
                        "score": score
                    }
                ))
        except Exception as e:
            logger.warning(f"Error loading episodic memory: {e}")

        return items

    def _load_from_semantic(self, query: str, limit: int) -> List[ContextItem]:
        """Load from semantic memory log."""
        items = []

        if not self.semantic_log.exists():
            return items

        try:
            with open(self.semantic_log) as f:
                data = json.load(f)

            entities = data.get("entities", [])

            for entity in entities[:limit * 2]:  # Oversample then filter
                name = entity.get("name", "")
                observations = entity.get("observations", [])
                content = f"{name}\n" + "\n".join(f"- {obs}" for obs in observations)

                relevance = self.scorer.score(query, content, {
                    "domain": entity.get("entityType", "observation")
                })

                if relevance > 0.1:  # Relevance threshold
                    items.append(ContextItem(
                        content=content,
                        source=f"semantic:{entity.get('entityType', 'unknown')}",
                        priority=ContextPriority.MEDIUM,
                        relevance_score=relevance,
                        metadata={
                            "entity_name": name,
                            "entity_type": entity.get("entityType")
                        }
                    ))

            # Sort by relevance and limit
            items.sort(key=lambda x: x.relevance_score, reverse=True)
            items = items[:limit]

        except Exception as e:
            logger.warning(f"Error loading semantic memory: {e}")

        return items

    def _load_from_triggers(self, query: str, limit: int) -> List[ContextItem]:
        """Load from trigger memory (n8n workflows)."""
        items = []

        # Check for n8n workflow definitions
        n8n_path = self.genesis_path / "genesis-n8n-templates"

        if not n8n_path.exists():
            return items

        try:
            for workflow_file in n8n_path.glob("**/*.json"):
                try:
                    with open(workflow_file) as f:
                        workflow = json.load(f)

                    name = workflow.get("name", workflow_file.stem)
                    nodes = workflow.get("nodes", [])

                    # Create summary of workflow
                    node_types = [n.get("type", "unknown") for n in nodes]
                    content = f"Workflow: {name}\nNodes: {', '.join(node_types[:5])}"

                    relevance = self.scorer.score(query, content + name)

                    if relevance > 0.1 and len(items) < limit:
                        items.append(ContextItem(
                            content=content,
                            source="trigger:n8n",
                            priority=ContextPriority.LOW,
                            relevance_score=relevance,
                            metadata={
                                "workflow_name": name,
                                "workflow_file": str(workflow_file)
                            }
                        ))
                except json.JSONDecodeError:
                    continue

        except Exception as e:
            logger.warning(f"Error loading trigger memory: {e}")

        return items


# =============================================================================
# Context Hooks
# =============================================================================

class ContextHookType(Enum):
    """Types of context hooks."""
    TASK_START = "task_start"
    TASK_COMPLETE = "task_complete"
    CONTEXT_REFRESH = "context_refresh"
    MEMORY_UPDATE = "memory_update"
    ERROR_DETECTED = "error_detected"


@dataclass
class ContextHook:
    """Represents a context injection hook."""
    hook_type: ContextHookType
    callback: Callable
    priority: int = 0               # Higher = runs first
    enabled: bool = True
    description: str = ""


class ContextHooks:
    """
    Manages hooks for automatic context injection.

    Hooks allow automatic context loading at key points:
    - on_task_start: Load relevant context when starting a task
    - on_task_complete: Store learnings back to memory
    - context_refresh: Update stale context
    """

    def __init__(self, memory_loader: MemoryContextLoader = None):
        self.hooks: Dict[ContextHookType, List[ContextHook]] = {
            hook_type: [] for hook_type in ContextHookType
        }
        self.memory_loader = memory_loader or MemoryContextLoader()
        self._register_default_hooks()

    def _register_default_hooks(self):
        """Register default context hooks."""
        # Task start: Load relevant context
        self.register(ContextHook(
            hook_type=ContextHookType.TASK_START,
            callback=self._default_task_start,
            priority=10,
            description="Load relevant context from memory"
        ))

        # Task complete: Store learnings
        self.register(ContextHook(
            hook_type=ContextHookType.TASK_COMPLETE,
            callback=self._default_task_complete,
            priority=10,
            description="Store task learnings to memory"
        ))

        # Context refresh: Update stale context
        self.register(ContextHook(
            hook_type=ContextHookType.CONTEXT_REFRESH,
            callback=self._default_context_refresh,
            priority=10,
            description="Refresh stale context items"
        ))

    def register(self, hook: ContextHook):
        """Register a new hook."""
        self.hooks[hook.hook_type].append(hook)
        # Sort by priority (higher first)
        self.hooks[hook.hook_type].sort(key=lambda h: h.priority, reverse=True)

    def unregister(self, hook_type: ContextHookType, callback: Callable):
        """Unregister a hook by callback."""
        self.hooks[hook_type] = [
            h for h in self.hooks[hook_type]
            if h.callback != callback
        ]

    def trigger(self, hook_type: ContextHookType, **kwargs) -> List[Any]:
        """
        Trigger all hooks of a type.

        Args:
            hook_type: Type of hook to trigger
            **kwargs: Arguments to pass to hook callbacks

        Returns:
            List of results from hook callbacks
        """
        results = []

        for hook in self.hooks[hook_type]:
            if not hook.enabled:
                continue

            try:
                result = hook.callback(**kwargs)
                results.append(result)
            except Exception as e:
                logger.error(f"Hook {hook.description} failed: {e}")

        return results

    def on_task_start(self, task: str, **kwargs) -> ContextBundle:
        """
        Hook: Called when starting a new task.

        Automatically loads relevant context from memory.
        """
        results = self.trigger(ContextHookType.TASK_START, task=task, **kwargs)

        # Merge context bundles
        merged = ContextBundle(query=task)
        for result in results:
            if isinstance(result, ContextBundle):
                for item in result.items:
                    merged.add(item)
            elif isinstance(result, ContextItem):
                merged.add(result)

        return merged

    def on_task_complete(self, task: str, result: str, learnings: List[str] = None, **kwargs):
        """
        Hook: Called when completing a task.

        Stores learnings back to memory.
        """
        return self.trigger(
            ContextHookType.TASK_COMPLETE,
            task=task,
            result=result,
            learnings=learnings or [],
            **kwargs
        )

    def context_refresh(self, context: ContextBundle, max_age_hours: float = 24, **kwargs) -> ContextBundle:
        """
        Hook: Refresh stale context items.

        Updates items older than max_age_hours.
        """
        results = self.trigger(
            ContextHookType.CONTEXT_REFRESH,
            context=context,
            max_age_hours=max_age_hours,
            **kwargs
        )

        # Return first ContextBundle result or original
        for result in results:
            if isinstance(result, ContextBundle):
                return result
        return context

    # Default hook implementations

    def _default_task_start(self, task: str, **kwargs) -> ContextBundle:
        """Default task start hook: load relevant context."""
        return self.memory_loader.load_relevant_context(task)

    def _default_task_complete(self, task: str, result: str, learnings: List[str], **kwargs) -> Dict:
        """Default task complete hook: store learnings."""
        stored = []

        # Import memory system for storing
        try:
            import sys
            sys.path.insert(0, str(GENESIS_PATH))
            from surprise_memory import MemorySystem

            memory = MemorySystem()

            # Store task completion as episodic memory
            memory.process(
                content=f"Completed task: {task}\nResult: {result}",
                source="context_hook",
                domain="task_completion",
                metadata={"task": task, "result": result}
            )
            stored.append("task_completion")

            # Store individual learnings
            for learning in learnings:
                memory.process(
                    content=learning,
                    source="context_hook",
                    domain="learning",
                    metadata={"source_task": task}
                )
                stored.append(learning[:50])

        except ImportError:
            logger.warning("Could not import memory system")
        except Exception as e:
            logger.error(f"Error storing learnings: {e}")

        return {"stored": stored}

    def _default_context_refresh(self, context: ContextBundle,
                                  max_age_hours: float, **kwargs) -> ContextBundle:
        """Default context refresh hook."""
        refreshed = ContextBundle(query=context.query)
        now = datetime.now()

        for item in context.items:
            # Check age
            try:
                item_time = datetime.fromisoformat(item.timestamp)
                age_hours = (now - item_time).total_seconds() / 3600

                if age_hours < max_age_hours:
                    # Keep fresh items
                    refreshed.add(item)
                else:
                    # Try to refresh stale items
                    fresh_items = self.memory_loader._load_from_episodic(
                        item.content[:100], limit=1
                    )
                    if fresh_items:
                        refreshed.add(fresh_items[0])
                    else:
                        # Keep stale item with reduced priority
                        stale_item = ContextItem(
                            content=item.content,
                            source=item.source,
                            priority=ContextPriority(max(1, item.priority.value - 1)),
                            relevance_score=item.relevance_score * 0.8,
                            timestamp=item.timestamp,
                            metadata={**item.metadata, "stale": True}
                        )
                        refreshed.add(stale_item)
            except (ValueError, TypeError):
                # Can't parse timestamp, keep item
                refreshed.add(item)

        return refreshed


# =============================================================================
# Sub-Agent Delegation
# =============================================================================

class SubAgentResult:
    """Result from a sub-agent task."""

    def __init__(self, task: str, result: str, tokens_used: int = 0,
                 success: bool = True, metadata: Dict = None):
        self.task = task
        self.result = result
        self.tokens_used = tokens_used
        self.success = success
        self.metadata = metadata or {}
        self.timestamp = datetime.now().isoformat()

    def to_context_item(self, max_tokens: int = 1000) -> ContextItem:
        """Convert to ContextItem for context injection."""
        # Compress result if needed
        budget = TokenBudgetManager()
        content = self.result

        if budget.estimate_tokens(content) > max_tokens:
            compressor = ContextCompressor(budget)
            content = compressor.compress(content, max_tokens)

        return ContextItem(
            content=content,
            source=f"sub_agent:{self.metadata.get('agent_type', 'generic')}",
            priority=ContextPriority.HIGH if self.success else ContextPriority.MEDIUM,
            relevance_score=0.9 if self.success else 0.5,
            timestamp=self.timestamp,
            metadata={
                "task": self.task,
                "success": self.success,
                **self.metadata
            }
        )


class SubAgentDelegator:
    """
    Manages sub-agent delegation for context-heavy tasks.

    Pattern: Orchestrator delegates to sub-agents, each with clean context.
    Sub-agents return condensed results that are integrated into the
    orchestrator's context.
    """

    def __init__(self, budget_manager: TokenBudgetManager = None,
                 max_sub_agent_tokens: int = 50000):
        self.budget = budget_manager or TokenBudgetManager()
        self.max_sub_agent_tokens = max_sub_agent_tokens
        self.compressor = ContextCompressor(self.budget)
        self._results_cache: Dict[str, SubAgentResult] = {}

    def prepare_sub_agent_context(self, task: str,
                                   relevant_context: ContextBundle,
                                   system_prompt: str = "") -> Dict[str, Any]:
        """
        Prepare context for a sub-agent task.

        Creates a clean, focused context for the sub-agent.

        Args:
            task: Task for the sub-agent
            relevant_context: Relevant context to include
            system_prompt: Optional system prompt additions

        Returns:
            Dictionary with prepared context
        """
        # Allocate token budget
        system_budget = self.budget.estimate_tokens(system_prompt) if system_prompt else 0
        context_budget = self.max_sub_agent_tokens - system_budget - 1000  # Reserve for task

        # Compress context to fit
        compressed_context = self.compressor.compress_bundle(relevant_context, context_budget)

        return {
            "task": task,
            "system_prompt": system_prompt,
            "context": compressed_context.to_text(),
            "context_tokens": compressed_context.total_tokens,
            "total_budget": self.max_sub_agent_tokens,
            "remaining_for_response": self.max_sub_agent_tokens - system_budget - compressed_context.total_tokens
        }

    def process_sub_agent_result(self, task: str, raw_result: str,
                                  success: bool = True,
                                  agent_type: str = "generic",
                                  summary_tokens: int = 1500) -> SubAgentResult:
        """
        Process and compress a sub-agent result.

        Takes the raw output and creates a condensed summary for
        integration into the orchestrator's context.

        Args:
            task: Original task
            raw_result: Full result from sub-agent
            success: Whether task succeeded
            agent_type: Type of sub-agent
            summary_tokens: Target tokens for summary

        Returns:
            SubAgentResult with compressed result
        """
        raw_tokens = self.budget.estimate_tokens(raw_result)

        # Compress if needed
        if raw_tokens > summary_tokens:
            compressed = self.compressor.compress(raw_result, summary_tokens, strategy="summarize")
        else:
            compressed = raw_result

        result = SubAgentResult(
            task=task,
            result=compressed,
            tokens_used=raw_tokens,
            success=success,
            metadata={
                "agent_type": agent_type,
                "original_tokens": raw_tokens,
                "compressed_tokens": self.budget.estimate_tokens(compressed)
            }
        )

        # Cache result
        task_hash = hashlib.md5(task.encode()).hexdigest()[:8]
        self._results_cache[task_hash] = result

        return result

    def merge_sub_agent_results(self, results: List[SubAgentResult],
                                 max_tokens: int = 3000) -> ContextBundle:
        """
        Merge multiple sub-agent results into a single context bundle.

        Args:
            results: List of sub-agent results
            max_tokens: Maximum tokens for merged result

        Returns:
            ContextBundle with merged results
        """
        bundle = ContextBundle(query="Sub-agent results")

        # Sort by success and convert to context items
        sorted_results = sorted(results, key=lambda r: r.success, reverse=True)

        tokens_per_result = max_tokens // max(len(results), 1)

        for result in sorted_results:
            item = result.to_context_item(max_tokens=tokens_per_result)
            bundle.add(item)

        # Compress if over budget
        if bundle.total_tokens > max_tokens:
            bundle = self.compressor.compress_bundle(bundle, max_tokens)

        return bundle


# =============================================================================
# Main Context Manager
# =============================================================================

class ContextManager:
    """
    Main context engineering manager for Genesis.

    Provides unified interface for:
    - Loading relevant context from memory
    - Progressive disclosure of information
    - Context compression and token management
    - Automatic context injection via hooks
    - Sub-agent delegation support

    Usage:
        ctx = ContextManager()

        # Load context for a task
        context = ctx.load_relevant_context("implement authentication")

        # Progressive disclosure
        disclosed = ctx.progressive_disclosure(context, depth=1)

        # Inject into prompt
        prompt = ctx.inject_context("User request here", disclosed)
    """

    def __init__(self, model: str = "default",
                 enable_hooks: bool = True,
                 genesis_path: Path = GENESIS_PATH):
        """
        Initialize the Context Manager.

        Args:
            model: Model name for token budget calculation
            enable_hooks: Whether to enable automatic hooks
            genesis_path: Path to Genesis system directory
        """
        self.genesis_path = genesis_path

        # Initialize components
        self.budget = TokenBudgetManager(model=model)
        self.memory_loader = MemoryContextLoader(genesis_path)
        self.compressor = ContextCompressor(self.budget)
        self.disclosure = ProgressiveDisclosure(self.budget)
        self.scorer = ContextRelevanceScorer()
        self.delegator = SubAgentDelegator(self.budget)

        # Initialize hooks if enabled
        self.hooks = ContextHooks(self.memory_loader) if enable_hooks else None

        # Track current context state
        self._current_context: Optional[ContextBundle] = None
        self._context_history: List[ContextBundle] = []

    def load_relevant_context(self, query: str,
                               sources: List[str] = None,
                               max_items: int = 15,
                               use_hooks: bool = True) -> ContextBundle:
        """
        Load relevant context for a query.

        Combines memory search with hook-based augmentation.

        Args:
            query: Query or task description
            sources: Memory sources to query
            max_items: Maximum context items
            use_hooks: Whether to trigger hooks

        Returns:
            ContextBundle with relevant context
        """
        # Use hooks if available and enabled
        if use_hooks and self.hooks:
            context = self.hooks.on_task_start(query)
        else:
            context = self.memory_loader.load_relevant_context(query, sources, max_items)

        # Score and sort by relevance
        for item in context.items:
            if item.relevance_score == 0.5:  # Default score
                item.relevance_score = self.scorer.score(
                    query, item.content, item.metadata
                )

        # Store current context
        self._current_context = context
        self._context_history.append(context)

        return context

    def progressive_disclosure(self, context: ContextBundle,
                                depth: int = 1,
                                budget: int = None) -> ContextBundle:
        """
        Apply progressive disclosure to context.

        Depth levels:
        - 0: Overview/summary only
        - 1: Essential information
        - 2: Detailed context
        - 3: Comprehensive (everything)

        Args:
            context: Context to disclose
            depth: Disclosure depth (0-3)
            budget: Optional token budget override

        Returns:
            Disclosed ContextBundle
        """
        return self.disclosure.disclose(context, depth, budget)

    def compress_context(self, context: Union[ContextBundle, str],
                         target_tokens: int,
                         strategy: str = "hybrid") -> Union[ContextBundle, str]:
        """
        Compress context to fit within token budget.

        Args:
            context: Context to compress
            target_tokens: Target token count
            strategy: Compression strategy (compact, summarize, hybrid)

        Returns:
            Compressed context
        """
        if isinstance(context, ContextBundle):
            return self.compressor.compress_bundle(context, target_tokens)
        else:
            return self.compressor.compress(context, target_tokens, strategy)

    def inject_context(self, prompt: str, context: Union[ContextBundle, str, None] = None,
                       position: str = "before",
                       max_context_tokens: int = None) -> str:
        """
        Inject context into a prompt.

        Args:
            prompt: User prompt
            context: Context to inject (uses current if None)
            position: Where to inject (before, after, wrapped)
            max_context_tokens: Maximum tokens for context

        Returns:
            Prompt with injected context
        """
        # Use current context if none provided
        if context is None:
            context = self._current_context

        if context is None:
            return prompt

        # Convert to text if needed
        if isinstance(context, ContextBundle):
            context_text = context.to_text(include_sources=True)
        else:
            context_text = context

        # Apply token limit if specified
        if max_context_tokens:
            current_tokens = self.budget.estimate_tokens(context_text)
            if current_tokens > max_context_tokens:
                context_text = self.budget.trim_to_budget(context_text, max_context_tokens)

        # Inject based on position
        if position == "before":
            return f"<context>\n{context_text}\n</context>\n\n{prompt}"
        elif position == "after":
            return f"{prompt}\n\n<context>\n{context_text}\n</context>"
        elif position == "wrapped":
            return f"<context>\n{context_text}\n</context>\n\n<task>\n{prompt}\n</task>"
        else:
            return f"<context>\n{context_text}\n</context>\n\n{prompt}"

    def estimate_tokens(self, text: str, content_type: str = "mixed") -> int:
        """
        Estimate token count for text.

        Args:
            text: Text to estimate
            content_type: Type of content (code, prose, json, mixed)

        Returns:
            Estimated token count
        """
        return self.budget.estimate_tokens(text, content_type)

    def trim_to_budget(self, context: str, budget: int,
                       preserve_start: bool = True,
                       content_type: str = "mixed") -> str:
        """
        Trim context to fit within token budget.

        Args:
            context: Context to trim
            budget: Maximum tokens
            preserve_start: Keep beginning (vs end)
            content_type: Type of content

        Returns:
            Trimmed context
        """
        return self.budget.trim_to_budget(context, budget, preserve_start, content_type)

    def prioritize_context(self, contexts: List[Tuple[str, Dict]],
                           query: str) -> List[Tuple[str, Dict, float]]:
        """
        Rank contexts by relevance to query.

        Args:
            contexts: List of (content, metadata) tuples
            query: Query to score against

        Returns:
            Sorted list of (content, metadata, score)
        """
        return self.scorer.prioritize_contexts(query, contexts)

    def delegate_to_sub_agent(self, task: str,
                               context: ContextBundle = None,
                               system_prompt: str = "") -> Dict[str, Any]:
        """
        Prepare context for sub-agent delegation.

        Args:
            task: Task for sub-agent
            context: Context to provide (uses current if None)
            system_prompt: Optional system prompt

        Returns:
            Prepared sub-agent context
        """
        context = context or self._current_context or ContextBundle()
        return self.delegator.prepare_sub_agent_context(task, context, system_prompt)

    def process_sub_agent_result(self, task: str, result: str,
                                  success: bool = True,
                                  agent_type: str = "generic") -> SubAgentResult:
        """
        Process a sub-agent result for context integration.

        Args:
            task: Original task
            result: Raw result from sub-agent
            success: Whether task succeeded
            agent_type: Type of sub-agent

        Returns:
            Processed SubAgentResult
        """
        return self.delegator.process_sub_agent_result(
            task, result, success, agent_type
        )

    def on_task_complete(self, task: str, result: str, learnings: List[str] = None):
        """
        Signal task completion to store learnings.

        Args:
            task: Completed task
            result: Task result
            learnings: List of learnings from task
        """
        if self.hooks:
            self.hooks.on_task_complete(task, result, learnings)

    def refresh_context(self, max_age_hours: float = 24) -> ContextBundle:
        """
        Refresh stale context items.

        Args:
            max_age_hours: Maximum age before refresh

        Returns:
            Refreshed context bundle
        """
        if self._current_context and self.hooks:
            self._current_context = self.hooks.context_refresh(
                self._current_context, max_age_hours
            )
        return self._current_context

    def get_stats(self) -> Dict[str, Any]:
        """Get context manager statistics."""
        stats = {
            "budget": self.budget.get_stats(),
            "current_context": {
                "items": len(self._current_context.items) if self._current_context else 0,
                "tokens": self._current_context.total_tokens if self._current_context else 0
            },
            "context_history_size": len(self._context_history),
            "hooks_enabled": self.hooks is not None
        }

        if self.hooks:
            stats["hooks"] = {
                hook_type.value: len(hooks)
                for hook_type, hooks in self.hooks.hooks.items()
            }

        return stats

    def clear(self):
        """Clear current context and reset state."""
        self._current_context = None
        self.budget.reset()


# =============================================================================
# CLI Interface
# =============================================================================

def main():
    """CLI interface for context engineering skill."""
    import argparse

    parser = argparse.ArgumentParser(
        description="Genesis Context Engineering Skill"
    )
    parser.add_argument("command", choices=[
        "load", "disclose", "compress", "inject", "stats", "estimate"
    ], help="Command to execute")
    parser.add_argument("--query", "-q", help="Query for context loading")
    parser.add_argument("--text", "-t", help="Text for operations")
    parser.add_argument("--depth", "-d", type=int, default=1,
                       help="Disclosure depth (0-3)")
    parser.add_argument("--budget", "-b", type=int, help="Token budget")
    parser.add_argument("--model", "-m", default="default",
                       help="Model for token limits")

    args = parser.parse_args()

    # Initialize context manager
    ctx = ContextManager(model=args.model)

    if args.command == "load":
        if not args.query:
            print("Error: --query required for load command")
            return 1

        context = ctx.load_relevant_context(args.query)
        print(f"\nLoaded {len(context.items)} context items ({context.total_tokens} tokens)")
        print("\nTop items:")
        for item in sorted(context.items, key=lambda x: x.relevance_score, reverse=True)[:5]:
            print(f"  [{item.priority.name}] {item.source} (relevance: {item.relevance_score:.2f})")
            print(f"    {item.content[:100]}...")

    elif args.command == "disclose":
        if not args.query:
            print("Error: --query required for disclose command")
            return 1

        context = ctx.load_relevant_context(args.query)
        disclosed = ctx.progressive_disclosure(context, depth=args.depth)
        print(f"\nDisclosed at depth {args.depth}: {len(disclosed.items)} items ({disclosed.total_tokens} tokens)")
        print("\n" + disclosed.to_text(include_sources=True)[:1000])

    elif args.command == "compress":
        text = args.text or "Sample text for compression"
        budget = args.budget or 100

        compressed = ctx.compress_context(text, budget)
        print(f"\nOriginal tokens: ~{ctx.estimate_tokens(text)}")
        print(f"Compressed tokens: ~{ctx.estimate_tokens(compressed)}")
        print(f"\nCompressed:\n{compressed}")

    elif args.command == "inject":
        if not args.query or not args.text:
            print("Error: --query and --text required for inject command")
            return 1

        context = ctx.load_relevant_context(args.query)
        disclosed = ctx.progressive_disclosure(context, depth=args.depth)
        injected = ctx.inject_context(args.text, disclosed)
        print(f"\nInjected prompt ({ctx.estimate_tokens(injected)} tokens):")
        print(injected[:2000])

    elif args.command == "estimate":
        text = args.text or "Sample text for estimation"
        tokens = ctx.estimate_tokens(text)
        print(f"\nText: {text[:100]}...")
        print(f"Estimated tokens: {tokens}")

    elif args.command == "stats":
        stats = ctx.get_stats()
        print("\nContext Manager Statistics:")
        print(json.dumps(stats, indent=2))

    return 0


if __name__ == "__main__":
    exit(main())
