"""
AIVA Procedural Memory System
=============================
A comprehensive procedural memory system for learning, chunking, automatizing,
and transferring skills across domains.

This module implements cognitive science principles for procedural knowledge:
- Skill acquisition through practice and feedback
- Chunking to combine steps into efficient routines
- Automatization tracking as skills become reflexive
- Motor program execution for action sequences
- Error correction and learning from mistakes
- Transfer learning to apply skills in new domains

Author: Genesis Lead Architect
Version: 1.0.0
"""

import json
import time
import uuid
import logging
import hashlib
import statistics
from enum import Enum
from datetime import datetime, timedelta
from dataclasses import dataclass, field, asdict
from typing import (
    Dict, List, Optional, Tuple, Any, Callable,
    Set, Union, TypeVar, Generic
)
from collections import defaultdict
from abc import ABC, abstractmethod

# Configure logging
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger("AIVA.ProceduralMemory")


# =============================================================================
# ENUMS AND DATA CLASSES
# =============================================================================

class SkillStage(Enum):
    """Fitts and Posner's stages of motor learning."""
    COGNITIVE = "cognitive"        # Conscious, deliberate, error-prone
    ASSOCIATIVE = "associative"    # Fewer errors, more fluid
    AUTONOMOUS = "autonomous"      # Automatic, requires little attention


class ChunkType(Enum):
    """Types of procedural chunks."""
    SEQUENCE = "sequence"          # Ordered series of steps
    PARALLEL = "parallel"          # Steps that can execute simultaneously
    CONDITIONAL = "conditional"    # Branch based on conditions
    LOOP = "loop"                  # Repeated execution
    COMPOSITE = "composite"        # Nested chunks


class ErrorCategory(Enum):
    """Categories of procedural errors."""
    OMISSION = "omission"          # Missing step
    COMMISSION = "commission"      # Wrong action
    SEQUENCE = "sequence"          # Wrong order
    TIMING = "timing"              # Incorrect timing
    PRECISION = "precision"        # Insufficient accuracy
    CONTEXT = "context"            # Wrong context application


class TransferType(Enum):
    """Types of skill transfer."""
    NEAR = "near"                  # Similar tasks/domains
    FAR = "far"                    # Dissimilar tasks/domains
    POSITIVE = "positive"          # Previous skill helps
    NEGATIVE = "negative"          # Previous skill interferes
    ZERO = "zero"                  # No transfer effect


@dataclass
class ProcedureStep:
    """A single step in a procedure."""
    step_id: str
    name: str
    action: str
    parameters: Dict[str, Any] = field(default_factory=dict)
    preconditions: List[str] = field(default_factory=list)
    postconditions: List[str] = field(default_factory=list)
    expected_duration_ms: int = 1000
    attention_required: float = 1.0  # 0.0 to 1.0
    error_rate: float = 0.0
    execution_count: int = 0

    def to_dict(self) -> Dict[str, Any]:
        """Convert to dictionary."""
        return asdict(self)

    @classmethod
    def from_dict(cls, data: Dict[str, Any]) -> 'ProcedureStep':
        """Create from dictionary."""
        return cls(**data)


@dataclass
class Skill:
    """Represents a learned skill."""
    skill_id: str
    name: str
    description: str
    domain: str
    steps: List[ProcedureStep] = field(default_factory=list)
    chunks: List[str] = field(default_factory=list)  # Chunk IDs
    stage: SkillStage = SkillStage.COGNITIVE
    proficiency: float = 0.0  # 0.0 to 1.0
    practice_count: int = 0
    total_practice_time_ms: int = 0
    error_history: List[Dict[str, Any]] = field(default_factory=list)
    acquisition_date: str = field(default_factory=lambda: datetime.now().isoformat())
    last_practiced: str = field(default_factory=lambda: datetime.now().isoformat())
    decay_rate: float = 0.01  # Per day
    transfer_history: List[Dict[str, str]] = field(default_factory=list)
    metadata: Dict[str, Any] = field(default_factory=dict)

    def to_dict(self) -> Dict[str, Any]:
        """Convert to dictionary."""
        data = asdict(self)
        data['stage'] = self.stage.value
        data['steps'] = [s.to_dict() if isinstance(s, ProcedureStep) else s for s in self.steps]
        return data

    @classmethod
    def from_dict(cls, data: Dict[str, Any]) -> 'Skill':
        """Create from dictionary."""
        data['stage'] = SkillStage(data['stage'])
        data['steps'] = [ProcedureStep.from_dict(s) if isinstance(s, dict) else s for s in data.get('steps', [])]
        return cls(**data)


@dataclass
class Chunk:
    """A chunked procedure - multiple steps combined into one unit."""
    chunk_id: str
    name: str
    chunk_type: ChunkType
    component_steps: List[str] = field(default_factory=list)  # Step IDs
    sub_chunks: List[str] = field(default_factory=list)  # Nested chunk IDs
    condition: Optional[str] = None  # For conditional chunks
    loop_count: Optional[int] = None  # For loop chunks
    creation_date: str = field(default_factory=lambda: datetime.now().isoformat())
    execution_count: int = 0
    average_duration_ms: int = 0
    success_rate: float = 1.0

    def to_dict(self) -> Dict[str, Any]:
        """Convert to dictionary."""
        data = asdict(self)
        data['chunk_type'] = self.chunk_type.value
        return data

    @classmethod
    def from_dict(cls, data: Dict[str, Any]) -> 'Chunk':
        """Create from dictionary."""
        data['chunk_type'] = ChunkType(data['chunk_type'])
        return cls(**data)


@dataclass
class ExecutionTrace:
    """Record of a skill/procedure execution."""
    trace_id: str
    skill_id: str
    start_time: str
    end_time: Optional[str] = None
    steps_executed: List[Dict[str, Any]] = field(default_factory=list)
    errors: List[Dict[str, Any]] = field(default_factory=list)
    success: bool = False
    attention_load: float = 0.0
    context: Dict[str, Any] = field(default_factory=dict)

    def to_dict(self) -> Dict[str, Any]:
        """Convert to dictionary."""
        return asdict(self)


@dataclass
class ErrorRecord:
    """Detailed error record for learning."""
    error_id: str
    skill_id: str
    step_id: str
    category: ErrorCategory
    description: str
    context: Dict[str, Any]
    timestamp: str
    corrective_action: Optional[str] = None
    learned: bool = False

    def to_dict(self) -> Dict[str, Any]:
        """Convert to dictionary."""
        data = asdict(self)
        data['category'] = self.category.value
        return data


# =============================================================================
# SKILL ACQUISITION ENGINE
# =============================================================================

class SkillAcquisition:
    """
    Engine for acquiring new procedural skills.

    Implements the cognitive-associative-autonomous progression
    based on practice, feedback, and error correction.
    """

    def __init__(self, storage_backend: Optional[Any] = None):
        """
        Initialize the skill acquisition system.

        Args:
            storage_backend: Optional persistent storage (Redis, PostgreSQL, etc.)
        """
        self.skills: Dict[str, Skill] = {}
        self.steps_library: Dict[str, ProcedureStep] = {}
        self.storage = storage_backend

        # Learning parameters
        self.practice_weight = 0.3        # Weight of practice in proficiency
        self.success_weight = 0.4         # Weight of success rate
        self.consistency_weight = 0.3     # Weight of timing consistency

        # Stage transition thresholds
        self.associative_threshold = 0.4  # Proficiency to enter associative
        self.autonomous_threshold = 0.8   # Proficiency to enter autonomous

        logger.info("SkillAcquisition engine initialized")

    def define_step(
        self,
        name: str,
        action: str,
        parameters: Optional[Dict[str, Any]] = None,
        preconditions: Optional[List[str]] = None,
        postconditions: Optional[List[str]] = None,
        expected_duration_ms: int = 1000,
        attention_required: float = 1.0
    ) -> ProcedureStep:
        """
        Define a new procedure step.

        Args:
            name: Human-readable step name
            action: The action to perform (function name, API call, etc.)
            parameters: Parameters for the action
            preconditions: Conditions that must be true before execution
            postconditions: Conditions expected to be true after execution
            expected_duration_ms: Expected execution time
            attention_required: Attention needed (0.0-1.0)

        Returns:
            The created ProcedureStep
        """
        step_id = f"step_{uuid.uuid4().hex[:12]}"

        step = ProcedureStep(
            step_id=step_id,
            name=name,
            action=action,
            parameters=parameters or {},
            preconditions=preconditions or [],
            postconditions=postconditions or [],
            expected_duration_ms=expected_duration_ms,
            attention_required=attention_required
        )

        self.steps_library[step_id] = step
        logger.info(f"Defined step: {name} ({step_id})")
        return step

    def learn_skill(
        self,
        name: str,
        description: str,
        domain: str,
        steps: List[ProcedureStep],
        metadata: Optional[Dict[str, Any]] = None
    ) -> Skill:
        """
        Learn a new skill from a sequence of steps.

        Args:
            name: Skill name
            description: What the skill accomplishes
            domain: The domain this skill belongs to
            steps: Ordered list of procedure steps
            metadata: Additional skill metadata

        Returns:
            The created Skill
        """
        skill_id = f"skill_{uuid.uuid4().hex[:12]}"

        # Store steps in library if not already present
        for step in steps:
            if step.step_id not in self.steps_library:
                self.steps_library[step.step_id] = step

        skill = Skill(
            skill_id=skill_id,
            name=name,
            description=description,
            domain=domain,
            steps=steps,
            metadata=metadata or {}
        )

        self.skills[skill_id] = skill
        logger.info(f"Learned new skill: {name} ({skill_id}) in domain '{domain}'")
        return skill

    def practice_skill(
        self,
        skill_id: str,
        execution_trace: ExecutionTrace
    ) -> Dict[str, Any]:
        """
        Record practice of a skill and update proficiency.

        Args:
            skill_id: ID of the skill practiced
            execution_trace: The execution record

        Returns:
            Dictionary with updated skill metrics
        """
        if skill_id not in self.skills:
            raise ValueError(f"Unknown skill: {skill_id}")

        skill = self.skills[skill_id]

        # Update practice statistics
        skill.practice_count += 1

        # Calculate execution duration
        start = datetime.fromisoformat(execution_trace.start_time)
        end = datetime.fromisoformat(execution_trace.end_time) if execution_trace.end_time else datetime.now()
        duration_ms = int((end - start).total_seconds() * 1000)
        skill.total_practice_time_ms += duration_ms

        # Calculate success rate
        error_count = len(execution_trace.errors)
        step_count = len(execution_trace.steps_executed)
        success_rate = (step_count - error_count) / max(step_count, 1)

        # Calculate timing consistency
        expected_duration = sum(s.expected_duration_ms for s in skill.steps)
        timing_ratio = min(expected_duration / max(duration_ms, 1), 1.0)

        # Update proficiency using weighted formula
        old_proficiency = skill.proficiency
        practice_factor = min(skill.practice_count / 100, 1.0)  # Caps at 100 practices

        new_proficiency = (
            self.practice_weight * practice_factor +
            self.success_weight * success_rate +
            self.consistency_weight * timing_ratio
        )

        # Smooth update to avoid sudden jumps
        skill.proficiency = 0.9 * skill.proficiency + 0.1 * new_proficiency
        skill.proficiency = min(skill.proficiency, 1.0)

        # Update skill stage based on proficiency
        old_stage = skill.stage
        if skill.proficiency >= self.autonomous_threshold:
            skill.stage = SkillStage.AUTONOMOUS
        elif skill.proficiency >= self.associative_threshold:
            skill.stage = SkillStage.ASSOCIATIVE
        else:
            skill.stage = SkillStage.COGNITIVE

        skill.last_practiced = datetime.now().isoformat()

        # Record errors in history
        if execution_trace.errors:
            skill.error_history.extend(execution_trace.errors)

        result = {
            "skill_id": skill_id,
            "practice_count": skill.practice_count,
            "proficiency": skill.proficiency,
            "proficiency_delta": skill.proficiency - old_proficiency,
            "stage": skill.stage.value,
            "stage_changed": skill.stage != old_stage,
            "success_rate": success_rate,
            "duration_ms": duration_ms,
            "errors": error_count
        }

        logger.info(f"Practiced skill {skill.name}: proficiency={skill.proficiency:.3f}, stage={skill.stage.value}")
        return result

    def decay_skills(self, days_elapsed: int = 1) -> List[Dict[str, Any]]:
        """
        Apply time-based decay to skill proficiency.

        Skills that aren't practiced will gradually decay,
        simulating the "use it or lose it" principle.

        Args:
            days_elapsed: Number of days since last decay

        Returns:
            List of skills that were affected
        """
        affected = []

        for skill_id, skill in self.skills.items():
            # Calculate days since last practice
            last_practiced = datetime.fromisoformat(skill.last_practiced)
            days_since = (datetime.now() - last_practiced).days

            if days_since > 0:
                # Apply exponential decay
                decay_factor = (1 - skill.decay_rate) ** (days_elapsed * days_since)
                old_proficiency = skill.proficiency
                skill.proficiency *= decay_factor

                # Update stage if proficiency dropped
                if skill.proficiency < self.associative_threshold:
                    skill.stage = SkillStage.COGNITIVE
                elif skill.proficiency < self.autonomous_threshold:
                    skill.stage = SkillStage.ASSOCIATIVE

                affected.append({
                    "skill_id": skill_id,
                    "name": skill.name,
                    "old_proficiency": old_proficiency,
                    "new_proficiency": skill.proficiency,
                    "stage": skill.stage.value,
                    "days_since_practice": days_since
                })

        logger.info(f"Applied decay to {len(affected)} skills")
        return affected

    def get_skill(self, skill_id: str) -> Optional[Skill]:
        """Get a skill by ID."""
        return self.skills.get(skill_id)

    def list_skills(
        self,
        domain: Optional[str] = None,
        stage: Optional[SkillStage] = None,
        min_proficiency: float = 0.0
    ) -> List[Skill]:
        """
        List skills with optional filtering.

        Args:
            domain: Filter by domain
            stage: Filter by skill stage
            min_proficiency: Minimum proficiency threshold

        Returns:
            List of matching skills
        """
        results = []

        for skill in self.skills.values():
            if domain and skill.domain != domain:
                continue
            if stage and skill.stage != stage:
                continue
            if skill.proficiency < min_proficiency:
                continue
            results.append(skill)

        return sorted(results, key=lambda s: s.proficiency, reverse=True)

    def get_skill_stats(self) -> Dict[str, Any]:
        """Get aggregate statistics about acquired skills."""
        if not self.skills:
            return {"total_skills": 0}

        stages = defaultdict(int)
        domains = defaultdict(int)
        proficiencies = []

        for skill in self.skills.values():
            stages[skill.stage.value] += 1
            domains[skill.domain] += 1
            proficiencies.append(skill.proficiency)

        return {
            "total_skills": len(self.skills),
            "by_stage": dict(stages),
            "by_domain": dict(domains),
            "avg_proficiency": statistics.mean(proficiencies) if proficiencies else 0,
            "total_practice_time_hours": sum(s.total_practice_time_ms for s in self.skills.values()) / 3600000,
            "total_steps_defined": len(self.steps_library)
        }


# =============================================================================
# CHUNKING ENGINE
# =============================================================================

class ChunkingEngine:
    """
    Engine for combining procedure steps into chunks.

    Chunking is a cognitive process that groups individual steps
    into larger, more efficient units that can be executed as one.
    """

    def __init__(self, skill_acquisition: SkillAcquisition):
        """
        Initialize the chunking engine.

        Args:
            skill_acquisition: The skill acquisition engine
        """
        self.skill_acquisition = skill_acquisition
        self.chunks: Dict[str, Chunk] = {}

        # Chunking parameters
        self.min_chunk_size = 2           # Minimum steps to form a chunk
        self.max_chunk_size = 7           # Miller's magic number +/- 2
        self.co_occurrence_threshold = 3  # Times steps must co-occur

        # Track step co-occurrences for automatic chunking
        self.co_occurrence_matrix: Dict[str, Dict[str, int]] = defaultdict(lambda: defaultdict(int))

        logger.info("ChunkingEngine initialized")

    def create_chunk(
        self,
        name: str,
        chunk_type: ChunkType,
        step_ids: List[str],
        condition: Optional[str] = None,
        loop_count: Optional[int] = None
    ) -> Chunk:
        """
        Create a new chunk from steps.

        Args:
            name: Chunk name
            chunk_type: Type of chunk (sequence, parallel, etc.)
            step_ids: IDs of steps to include
            condition: Condition for conditional chunks
            loop_count: Number of iterations for loop chunks

        Returns:
            The created Chunk
        """
        # Validate step IDs
        for step_id in step_ids:
            if step_id not in self.skill_acquisition.steps_library:
                raise ValueError(f"Unknown step: {step_id}")

        if len(step_ids) < self.min_chunk_size:
            raise ValueError(f"Chunk must have at least {self.min_chunk_size} steps")

        if len(step_ids) > self.max_chunk_size:
            logger.warning(f"Chunk exceeds recommended size ({self.max_chunk_size})")

        chunk_id = f"chunk_{uuid.uuid4().hex[:12]}"

        chunk = Chunk(
            chunk_id=chunk_id,
            name=name,
            chunk_type=chunk_type,
            component_steps=step_ids,
            condition=condition,
            loop_count=loop_count
        )

        # Calculate average duration from component steps
        total_duration = sum(
            self.skill_acquisition.steps_library[sid].expected_duration_ms
            for sid in step_ids
        )

        # Chunks are typically faster than sum of parts due to reduced overhead
        chunk.average_duration_ms = int(total_duration * 0.7)  # 30% efficiency gain

        self.chunks[chunk_id] = chunk
        logger.info(f"Created chunk: {name} ({chunk_id}) with {len(step_ids)} steps")
        return chunk

    def create_composite_chunk(
        self,
        name: str,
        chunk_ids: List[str]
    ) -> Chunk:
        """
        Create a composite chunk from other chunks.

        Args:
            name: Chunk name
            chunk_ids: IDs of chunks to combine

        Returns:
            The created composite Chunk
        """
        # Validate chunk IDs
        for chunk_id in chunk_ids:
            if chunk_id not in self.chunks:
                raise ValueError(f"Unknown chunk: {chunk_id}")

        composite_id = f"chunk_{uuid.uuid4().hex[:12]}"

        chunk = Chunk(
            chunk_id=composite_id,
            name=name,
            chunk_type=ChunkType.COMPOSITE,
            sub_chunks=chunk_ids
        )

        # Calculate duration from sub-chunks
        total_duration = sum(
            self.chunks[cid].average_duration_ms
            for cid in chunk_ids
        )
        chunk.average_duration_ms = int(total_duration * 0.85)  # Less efficiency gain for composites

        self.chunks[composite_id] = chunk
        logger.info(f"Created composite chunk: {name} ({composite_id}) from {len(chunk_ids)} chunks")
        return chunk

    def record_execution_sequence(self, step_ids: List[str]):
        """
        Record a sequence of step executions for co-occurrence analysis.

        This helps identify steps that frequently occur together
        and are candidates for automatic chunking.

        Args:
            step_ids: Sequence of executed step IDs
        """
        for i in range(len(step_ids) - 1):
            current = step_ids[i]
            next_step = step_ids[i + 1]
            self.co_occurrence_matrix[current][next_step] += 1

        logger.debug(f"Recorded execution sequence of {len(step_ids)} steps")

    def suggest_chunks(self, min_co_occurrences: Optional[int] = None) -> List[Dict[str, Any]]:
        """
        Suggest potential chunks based on co-occurrence analysis.

        Args:
            min_co_occurrences: Minimum co-occurrence count (uses default if not specified)

        Returns:
            List of suggested chunk configurations
        """
        threshold = min_co_occurrences or self.co_occurrence_threshold
        suggestions = []

        # Find frequently co-occurring step pairs
        pairs = []
        for step1, neighbors in self.co_occurrence_matrix.items():
            for step2, count in neighbors.items():
                if count >= threshold:
                    pairs.append((step1, step2, count))

        # Group connected pairs into potential chunks
        # Simple greedy approach - could be improved with graph clustering
        used_steps = set()

        for step1, step2, count in sorted(pairs, key=lambda x: x[2], reverse=True):
            if step1 in used_steps or step2 in used_steps:
                continue

            # Try to extend the chunk
            chunk_steps = [step1, step2]
            used_steps.add(step1)
            used_steps.add(step2)

            # Look for additional steps that frequently follow
            current = step2
            while len(chunk_steps) < self.max_chunk_size:
                neighbors = self.co_occurrence_matrix.get(current, {})
                best_next = None
                best_count = threshold

                for next_step, c in neighbors.items():
                    if next_step not in used_steps and c >= best_count:
                        best_next = next_step
                        best_count = c

                if best_next:
                    chunk_steps.append(best_next)
                    used_steps.add(best_next)
                    current = best_next
                else:
                    break

            if len(chunk_steps) >= self.min_chunk_size:
                step_names = [
                    self.skill_acquisition.steps_library[sid].name
                    for sid in chunk_steps
                    if sid in self.skill_acquisition.steps_library
                ]

                suggestions.append({
                    "step_ids": chunk_steps,
                    "step_names": step_names,
                    "co_occurrence_score": count,
                    "suggested_name": f"chunk_{step_names[0]}_{step_names[-1]}"
                })

        logger.info(f"Generated {len(suggestions)} chunk suggestions")
        return suggestions

    def apply_chunk_to_skill(self, skill_id: str, chunk_id: str) -> bool:
        """
        Apply a chunk to a skill, replacing individual steps with the chunk.

        Args:
            skill_id: ID of the skill
            chunk_id: ID of the chunk to apply

        Returns:
            True if successful
        """
        if skill_id not in self.skill_acquisition.skills:
            raise ValueError(f"Unknown skill: {skill_id}")
        if chunk_id not in self.chunks:
            raise ValueError(f"Unknown chunk: {chunk_id}")

        skill = self.skill_acquisition.skills[skill_id]
        chunk = self.chunks[chunk_id]

        # Check if skill contains all chunk steps
        skill_step_ids = [s.step_id for s in skill.steps]
        if not all(sid in skill_step_ids for sid in chunk.component_steps):
            logger.warning(f"Skill {skill_id} doesn't contain all chunk steps")
            return False

        # Add chunk to skill
        if chunk_id not in skill.chunks:
            skill.chunks.append(chunk_id)
            logger.info(f"Applied chunk {chunk.name} to skill {skill.name}")
            return True

        return False

    def get_chunk(self, chunk_id: str) -> Optional[Chunk]:
        """Get a chunk by ID."""
        return self.chunks.get(chunk_id)

    def list_chunks(self, chunk_type: Optional[ChunkType] = None) -> List[Chunk]:
        """List all chunks, optionally filtered by type."""
        if chunk_type:
            return [c for c in self.chunks.values() if c.chunk_type == chunk_type]
        return list(self.chunks.values())


# =============================================================================
# AUTOMATIZATION TRACKER
# =============================================================================

class AutomatizationTracker:
    """
    Tracks the automatization of skills as they progress from
    conscious effort to automatic execution.

    Measures cognitive load, reaction time, and attention requirements
    to determine automatization level.
    """

    def __init__(self, skill_acquisition: SkillAcquisition):
        """
        Initialize the automatization tracker.

        Args:
            skill_acquisition: The skill acquisition engine
        """
        self.skill_acquisition = skill_acquisition

        # Tracking data
        self.reaction_times: Dict[str, List[int]] = defaultdict(list)  # skill_id -> times in ms
        self.attention_loads: Dict[str, List[float]] = defaultdict(list)  # skill_id -> loads
        self.dual_task_performance: Dict[str, List[Dict[str, Any]]] = defaultdict(list)

        # Automatization thresholds
        self.reaction_time_threshold_ms = 500  # Fast enough to be automatic
        self.attention_threshold = 0.3         # Low enough to run in background
        self.dual_task_decrement_threshold = 0.1  # Acceptable performance drop

        logger.info("AutomatizationTracker initialized")

    def record_execution(
        self,
        skill_id: str,
        reaction_time_ms: int,
        attention_load: float,
        dual_task_data: Optional[Dict[str, Any]] = None
    ):
        """
        Record execution metrics for automatization analysis.

        Args:
            skill_id: ID of the executed skill
            reaction_time_ms: Time from trigger to first action
            attention_load: Estimated attention required (0.0-1.0)
            dual_task_data: Optional data from dual-task testing
        """
        self.reaction_times[skill_id].append(reaction_time_ms)
        self.attention_loads[skill_id].append(attention_load)

        if dual_task_data:
            self.dual_task_performance[skill_id].append(dual_task_data)

        # Keep only recent history
        max_history = 100
        if len(self.reaction_times[skill_id]) > max_history:
            self.reaction_times[skill_id] = self.reaction_times[skill_id][-max_history:]
        if len(self.attention_loads[skill_id]) > max_history:
            self.attention_loads[skill_id] = self.attention_loads[skill_id][-max_history:]

        logger.debug(f"Recorded execution for {skill_id}: RT={reaction_time_ms}ms, attention={attention_load}")

    def calculate_automatization_index(self, skill_id: str) -> Dict[str, Any]:
        """
        Calculate a composite automatization index for a skill.

        The index combines multiple measures of automaticity:
        - Reaction time (faster = more automatic)
        - Attention load (lower = more automatic)
        - Dual-task performance (less decrement = more automatic)
        - Variability (lower = more automatic)

        Args:
            skill_id: ID of the skill

        Returns:
            Dictionary with automatization metrics
        """
        if skill_id not in self.skill_acquisition.skills:
            raise ValueError(f"Unknown skill: {skill_id}")

        skill = self.skill_acquisition.skills[skill_id]

        # Default values if no data
        if not self.reaction_times[skill_id]:
            return {
                "skill_id": skill_id,
                "skill_name": skill.name,
                "automatization_index": 0.0,
                "is_automatic": False,
                "data_points": 0,
                "components": {}
            }

        rts = self.reaction_times[skill_id]
        loads = self.attention_loads[skill_id]

        # Calculate component scores (0.0 to 1.0, higher = more automatic)
        avg_rt = statistics.mean(rts)
        rt_score = max(0, 1 - (avg_rt / 2000))  # Normalize to 2000ms max

        avg_load = statistics.mean(loads) if loads else 1.0
        load_score = 1 - avg_load

        # Variability score (lower std dev = more automatic)
        rt_std = statistics.stdev(rts) if len(rts) > 1 else avg_rt
        variability_score = max(0, 1 - (rt_std / avg_rt)) if avg_rt > 0 else 0

        # Dual-task score
        dual_score = 0.5  # Default
        if self.dual_task_performance[skill_id]:
            decrements = [d.get("performance_decrement", 0) for d in self.dual_task_performance[skill_id]]
            avg_decrement = statistics.mean(decrements)
            dual_score = max(0, 1 - (avg_decrement / 0.5))  # 50% decrement is fully non-automatic

        # Composite index (weighted average)
        automatization_index = (
            0.3 * rt_score +
            0.3 * load_score +
            0.2 * variability_score +
            0.2 * dual_score
        )

        is_automatic = (
            avg_rt < self.reaction_time_threshold_ms and
            avg_load < self.attention_threshold and
            automatization_index > 0.7
        )

        return {
            "skill_id": skill_id,
            "skill_name": skill.name,
            "automatization_index": automatization_index,
            "is_automatic": is_automatic,
            "data_points": len(rts),
            "components": {
                "reaction_time": {
                    "average_ms": avg_rt,
                    "score": rt_score
                },
                "attention_load": {
                    "average": avg_load,
                    "score": load_score
                },
                "variability": {
                    "coefficient": rt_std / avg_rt if avg_rt > 0 else 0,
                    "score": variability_score
                },
                "dual_task": {
                    "data_points": len(self.dual_task_performance[skill_id]),
                    "score": dual_score
                }
            }
        }

    def get_automatization_report(self) -> Dict[str, Any]:
        """
        Generate a comprehensive report of automatization across all skills.

        Returns:
            Report with automatization status for all tracked skills
        """
        report = {
            "timestamp": datetime.now().isoformat(),
            "skills_tracked": len(self.reaction_times),
            "automatic_skills": 0,
            "skills": []
        }

        for skill_id in self.reaction_times.keys():
            try:
                metrics = self.calculate_automatization_index(skill_id)
                report["skills"].append(metrics)
                if metrics["is_automatic"]:
                    report["automatic_skills"] += 1
            except ValueError:
                continue

        # Sort by automatization index
        report["skills"].sort(key=lambda x: x["automatization_index"], reverse=True)

        return report

    def suggest_practice_targets(self, top_n: int = 5) -> List[Dict[str, Any]]:
        """
        Suggest skills that would benefit most from additional practice.

        Identifies skills that are close to automatization but need
        more practice to cross the threshold.

        Args:
            top_n: Number of suggestions to return

        Returns:
            List of skills with practice recommendations
        """
        suggestions = []

        for skill_id in self.skill_acquisition.skills.keys():
            metrics = self.calculate_automatization_index(skill_id)

            if not metrics["is_automatic"] and metrics["automatization_index"] > 0.4:
                # Identify weakest component
                components = metrics["components"]
                weakest = min(components.items(), key=lambda x: x[1].get("score", 1))

                suggestions.append({
                    "skill_id": skill_id,
                    "skill_name": metrics["skill_name"],
                    "current_index": metrics["automatization_index"],
                    "target_component": weakest[0],
                    "component_score": weakest[1].get("score", 0),
                    "recommendation": self._get_practice_recommendation(weakest[0])
                })

        # Sort by automatization index (closer to threshold first)
        suggestions.sort(key=lambda x: x["current_index"], reverse=True)
        return suggestions[:top_n]

    def _get_practice_recommendation(self, component: str) -> str:
        """Get practice recommendation for a component."""
        recommendations = {
            "reaction_time": "Practice with time pressure to reduce reaction time",
            "attention_load": "Practice while performing secondary tasks",
            "variability": "Focus on consistent execution timing",
            "dual_task": "Practice skill while performing unrelated tasks"
        }
        return recommendations.get(component, "Continue regular practice")


# =============================================================================
# MOTOR PROGRAM EXECUTOR
# =============================================================================

class MotorProgram:
    """
    Executes action sequences (motor programs) for procedural skills.

    Handles the actual execution of steps, managing timing,
    sequencing, and coordination of actions.
    """

    def __init__(
        self,
        skill_acquisition: SkillAcquisition,
        chunking_engine: ChunkingEngine,
        action_executor: Optional[Callable[[ProcedureStep], Tuple[bool, Any]]] = None
    ):
        """
        Initialize the motor program executor.

        Args:
            skill_acquisition: The skill acquisition engine
            chunking_engine: The chunking engine
            action_executor: Optional callable to execute individual steps
        """
        self.skill_acquisition = skill_acquisition
        self.chunking_engine = chunking_engine
        self.action_executor = action_executor or self._default_executor

        # Execution state
        self.current_trace: Optional[ExecutionTrace] = None
        self.is_executing: bool = False

        # Timing parameters
        self.inter_step_delay_ms = 50  # Minimum delay between steps

        logger.info("MotorProgram executor initialized")

    def _default_executor(self, step: ProcedureStep) -> Tuple[bool, Any]:
        """Default step executor - simulates execution."""
        time.sleep(step.expected_duration_ms / 1000)
        return (True, {"simulated": True})

    def execute_skill(
        self,
        skill_id: str,
        context: Optional[Dict[str, Any]] = None,
        use_chunks: bool = True
    ) -> ExecutionTrace:
        """
        Execute a skill's action sequence.

        Args:
            skill_id: ID of the skill to execute
            context: Execution context (parameters, state, etc.)
            use_chunks: Whether to use chunked execution

        Returns:
            ExecutionTrace with execution details
        """
        if skill_id not in self.skill_acquisition.skills:
            raise ValueError(f"Unknown skill: {skill_id}")

        if self.is_executing:
            raise RuntimeError("Execution already in progress")

        skill = self.skill_acquisition.skills[skill_id]
        self.is_executing = True

        # Create execution trace
        trace_id = f"trace_{uuid.uuid4().hex[:12]}"
        self.current_trace = ExecutionTrace(
            trace_id=trace_id,
            skill_id=skill_id,
            start_time=datetime.now().isoformat(),
            context=context or {}
        )

        try:
            if use_chunks and skill.chunks:
                self._execute_with_chunks(skill)
            else:
                self._execute_sequential(skill)

            self.current_trace.success = len(self.current_trace.errors) == 0

        except Exception as e:
            self.current_trace.errors.append({
                "type": "execution_error",
                "message": str(e),
                "timestamp": datetime.now().isoformat()
            })
            self.current_trace.success = False
            logger.error(f"Skill execution failed: {e}")

        finally:
            self.current_trace.end_time = datetime.now().isoformat()
            self.is_executing = False

        # Record for chunking analysis
        step_ids = [s["step_id"] for s in self.current_trace.steps_executed]
        self.chunking_engine.record_execution_sequence(step_ids)

        # Update skill with practice
        self.skill_acquisition.practice_skill(skill_id, self.current_trace)

        return self.current_trace

    def _execute_sequential(self, skill: Skill):
        """Execute skill steps sequentially."""
        for step in skill.steps:
            self._execute_step(step)
            time.sleep(self.inter_step_delay_ms / 1000)

    def _execute_with_chunks(self, skill: Skill):
        """Execute skill using chunks where possible."""
        executed_steps = set()

        # First, execute chunks
        for chunk_id in skill.chunks:
            chunk = self.chunking_engine.get_chunk(chunk_id)
            if chunk:
                self._execute_chunk(chunk)
                executed_steps.update(chunk.component_steps)

        # Then execute remaining steps
        for step in skill.steps:
            if step.step_id not in executed_steps:
                self._execute_step(step)
                time.sleep(self.inter_step_delay_ms / 1000)

    def _execute_chunk(self, chunk: Chunk):
        """Execute a chunk."""
        logger.debug(f"Executing chunk: {chunk.name}")

        if chunk.chunk_type == ChunkType.SEQUENCE:
            for step_id in chunk.component_steps:
                step = self.skill_acquisition.steps_library.get(step_id)
                if step:
                    self._execute_step(step)

        elif chunk.chunk_type == ChunkType.PARALLEL:
            # Simulate parallel execution
            for step_id in chunk.component_steps:
                step = self.skill_acquisition.steps_library.get(step_id)
                if step:
                    self._execute_step(step)

        elif chunk.chunk_type == ChunkType.CONDITIONAL:
            if chunk.condition and self._evaluate_condition(chunk.condition):
                for step_id in chunk.component_steps:
                    step = self.skill_acquisition.steps_library.get(step_id)
                    if step:
                        self._execute_step(step)

        elif chunk.chunk_type == ChunkType.LOOP:
            for _ in range(chunk.loop_count or 1):
                for step_id in chunk.component_steps:
                    step = self.skill_acquisition.steps_library.get(step_id)
                    if step:
                        self._execute_step(step)

        elif chunk.chunk_type == ChunkType.COMPOSITE:
            for sub_chunk_id in chunk.sub_chunks:
                sub_chunk = self.chunking_engine.get_chunk(sub_chunk_id)
                if sub_chunk:
                    self._execute_chunk(sub_chunk)

        chunk.execution_count += 1

    def _execute_step(self, step: ProcedureStep) -> bool:
        """
        Execute a single step.

        Args:
            step: The step to execute

        Returns:
            True if successful
        """
        step_start = datetime.now()

        try:
            # Check preconditions
            for precond in step.preconditions:
                if not self._evaluate_condition(precond):
                    raise RuntimeError(f"Precondition failed: {precond}")

            # Execute the action
            success, result = self.action_executor(step)

            step.execution_count += 1

            # Record in trace
            self.current_trace.steps_executed.append({
                "step_id": step.step_id,
                "name": step.name,
                "start_time": step_start.isoformat(),
                "end_time": datetime.now().isoformat(),
                "success": success,
                "result": result
            })

            if not success:
                step.error_rate = (step.error_rate * (step.execution_count - 1) + 1) / step.execution_count
                self.current_trace.errors.append({
                    "step_id": step.step_id,
                    "type": "step_failure",
                    "timestamp": datetime.now().isoformat()
                })
            else:
                step.error_rate = step.error_rate * (step.execution_count - 1) / step.execution_count

            return success

        except Exception as e:
            self.current_trace.errors.append({
                "step_id": step.step_id,
                "type": "exception",
                "message": str(e),
                "timestamp": datetime.now().isoformat()
            })
            return False

    def _evaluate_condition(self, condition: str) -> bool:
        """Evaluate a condition string (placeholder implementation)."""
        # In a real implementation, this would evaluate conditions
        # against the current context
        return True

    def get_current_trace(self) -> Optional[ExecutionTrace]:
        """Get the current or most recent execution trace."""
        return self.current_trace


# =============================================================================
# ERROR CORRECTION ENGINE
# =============================================================================

class ErrorCorrection:
    """
    Learns from procedural errors and develops correction strategies.

    Analyzes error patterns, identifies root causes, and generates
    corrective procedures to prevent future errors.
    """

    def __init__(self, skill_acquisition: SkillAcquisition):
        """
        Initialize the error correction engine.

        Args:
            skill_acquisition: The skill acquisition engine
        """
        self.skill_acquisition = skill_acquisition
        self.error_records: List[ErrorRecord] = []
        self.error_patterns: Dict[str, Dict[str, Any]] = {}
        self.corrective_procedures: Dict[str, List[str]] = {}  # error_pattern -> corrective steps

        # Analysis parameters
        self.pattern_threshold = 3  # Minimum occurrences to identify a pattern

        logger.info("ErrorCorrection engine initialized")

    def record_error(
        self,
        skill_id: str,
        step_id: str,
        category: ErrorCategory,
        description: str,
        context: Dict[str, Any]
    ) -> ErrorRecord:
        """
        Record an error occurrence.

        Args:
            skill_id: ID of the skill where error occurred
            step_id: ID of the step where error occurred
            category: Error category
            description: Error description
            context: Context at time of error

        Returns:
            The created ErrorRecord
        """
        error_id = f"error_{uuid.uuid4().hex[:12]}"

        record = ErrorRecord(
            error_id=error_id,
            skill_id=skill_id,
            step_id=step_id,
            category=category,
            description=description,
            context=context,
            timestamp=datetime.now().isoformat()
        )

        self.error_records.append(record)

        # Update skill error history
        if skill_id in self.skill_acquisition.skills:
            self.skill_acquisition.skills[skill_id].error_history.append(record.to_dict())

        # Trigger pattern analysis
        self._update_patterns()

        logger.info(f"Recorded {category.value} error in skill {skill_id}, step {step_id}")
        return record

    def _update_patterns(self):
        """Analyze error records to identify patterns."""
        # Group errors by skill and category
        pattern_counts: Dict[str, Dict[str, int]] = defaultdict(lambda: defaultdict(int))

        for error in self.error_records:
            key = f"{error.skill_id}:{error.step_id}"
            pattern_counts[key][error.category.value] += 1

        # Identify patterns above threshold
        for key, categories in pattern_counts.items():
            for category, count in categories.items():
                if count >= self.pattern_threshold:
                    pattern_id = f"pattern_{key}_{category}"

                    if pattern_id not in self.error_patterns:
                        skill_id, step_id = key.split(":")
                        self.error_patterns[pattern_id] = {
                            "pattern_id": pattern_id,
                            "skill_id": skill_id,
                            "step_id": step_id,
                            "category": category,
                            "occurrence_count": count,
                            "first_detected": datetime.now().isoformat(),
                            "corrective_procedure": None
                        }
                        logger.info(f"Identified new error pattern: {pattern_id}")
                    else:
                        self.error_patterns[pattern_id]["occurrence_count"] = count

    def analyze_error_causes(self, skill_id: str) -> Dict[str, Any]:
        """
        Analyze root causes of errors in a skill.

        Args:
            skill_id: ID of the skill to analyze

        Returns:
            Analysis report with root causes
        """
        skill_errors = [e for e in self.error_records if e.skill_id == skill_id]

        if not skill_errors:
            return {
                "skill_id": skill_id,
                "total_errors": 0,
                "analysis": "No errors recorded"
            }

        # Categorize errors
        by_category = defaultdict(list)
        by_step = defaultdict(list)

        for error in skill_errors:
            by_category[error.category.value].append(error)
            by_step[error.step_id].append(error)

        # Find most problematic step
        most_errors_step = max(by_step.items(), key=lambda x: len(x[1]))[0]
        step = self.skill_acquisition.steps_library.get(most_errors_step)

        # Identify temporal patterns
        error_times = [datetime.fromisoformat(e.timestamp) for e in skill_errors]
        recent_count = sum(1 for t in error_times if (datetime.now() - t).days < 7)

        return {
            "skill_id": skill_id,
            "total_errors": len(skill_errors),
            "by_category": {k: len(v) for k, v in by_category.items()},
            "by_step": {k: len(v) for k, v in by_step.items()},
            "most_problematic_step": {
                "step_id": most_errors_step,
                "step_name": step.name if step else "Unknown",
                "error_count": len(by_step[most_errors_step])
            },
            "temporal_trend": {
                "recent_7_days": recent_count,
                "older": len(skill_errors) - recent_count
            },
            "root_cause_hypothesis": self._generate_hypothesis(by_category)
        }

    def _generate_hypothesis(self, by_category: Dict[str, List[ErrorRecord]]) -> str:
        """Generate root cause hypothesis based on error categories."""
        if not by_category:
            return "Insufficient data for hypothesis"

        dominant = max(by_category.items(), key=lambda x: len(x[1]))[0]

        hypotheses = {
            "omission": "Steps may be too complex or poorly defined",
            "commission": "Similar actions may be confusing",
            "sequence": "Step order dependencies may be unclear",
            "timing": "Timing constraints may be too strict",
            "precision": "Step parameters may need refinement",
            "context": "Context recognition may need improvement"
        }

        return hypotheses.get(dominant, "Unknown cause category")

    def generate_corrective_procedure(
        self,
        pattern_id: str
    ) -> List[Dict[str, Any]]:
        """
        Generate a corrective procedure for an error pattern.

        Args:
            pattern_id: ID of the error pattern

        Returns:
            List of corrective steps
        """
        if pattern_id not in self.error_patterns:
            raise ValueError(f"Unknown pattern: {pattern_id}")

        pattern = self.error_patterns[pattern_id]
        category = pattern["category"]
        step_id = pattern["step_id"]

        step = self.skill_acquisition.steps_library.get(step_id)
        if not step:
            return []

        # Generate category-specific corrections
        corrections = []

        if category == "omission":
            corrections = [
                {"action": "add_reminder", "target": step_id, "timing": "before"},
                {"action": "add_checklist_item", "step_name": step.name}
            ]

        elif category == "commission":
            corrections = [
                {"action": "add_verification", "target": step_id},
                {"action": "clarify_conditions", "preconditions": step.preconditions}
            ]

        elif category == "sequence":
            corrections = [
                {"action": "add_dependency_marker", "target": step_id},
                {"action": "enforce_ordering", "strict": True}
            ]

        elif category == "timing":
            corrections = [
                {"action": "adjust_timing", "target": step_id, "buffer_ms": 200},
                {"action": "add_timing_feedback", "visual": True}
            ]

        elif category == "precision":
            corrections = [
                {"action": "refine_parameters", "target": step_id},
                {"action": "add_tolerance_check", "threshold": 0.1}
            ]

        elif category == "context":
            corrections = [
                {"action": "enhance_context_check", "target": step_id},
                {"action": "add_context_hints", "display": True}
            ]

        self.error_patterns[pattern_id]["corrective_procedure"] = corrections
        self.corrective_procedures[pattern_id] = [str(c) for c in corrections]

        logger.info(f"Generated corrective procedure for pattern {pattern_id}")
        return corrections

    def apply_learning(
        self,
        error_id: str,
        corrective_action: str
    ):
        """
        Mark an error as learned from with a corrective action.

        Args:
            error_id: ID of the error
            corrective_action: Description of corrective action taken
        """
        for record in self.error_records:
            if record.error_id == error_id:
                record.corrective_action = corrective_action
                record.learned = True
                logger.info(f"Applied learning to error {error_id}")
                return

        logger.warning(f"Error {error_id} not found")

    def get_error_statistics(self) -> Dict[str, Any]:
        """Get overall error statistics."""
        if not self.error_records:
            return {"total_errors": 0}

        by_category = defaultdict(int)
        by_skill = defaultdict(int)
        learned_count = 0

        for error in self.error_records:
            by_category[error.category.value] += 1
            by_skill[error.skill_id] += 1
            if error.learned:
                learned_count += 1

        return {
            "total_errors": len(self.error_records),
            "by_category": dict(by_category),
            "by_skill": dict(by_skill),
            "patterns_identified": len(self.error_patterns),
            "errors_learned_from": learned_count,
            "learning_rate": learned_count / len(self.error_records) if self.error_records else 0
        }


# =============================================================================
# TRANSFER LEARNING ENGINE
# =============================================================================

class TransferLearning:
    """
    Applies learned skills to new domains through transfer learning.

    Identifies structural similarities between skills and enables
    adaptation of existing procedural knowledge to new contexts.
    """

    def __init__(self, skill_acquisition: SkillAcquisition):
        """
        Initialize the transfer learning engine.

        Args:
            skill_acquisition: The skill acquisition engine
        """
        self.skill_acquisition = skill_acquisition

        # Transfer records
        self.transfer_attempts: List[Dict[str, Any]] = []
        self.domain_mappings: Dict[str, Dict[str, float]] = defaultdict(dict)  # domain -> domain -> similarity

        # Transfer parameters
        self.similarity_threshold = 0.6  # Minimum similarity for positive transfer

        logger.info("TransferLearning engine initialized")

    def calculate_skill_similarity(
        self,
        skill_id_1: str,
        skill_id_2: str
    ) -> Dict[str, Any]:
        """
        Calculate structural similarity between two skills.

        Args:
            skill_id_1: First skill ID
            skill_id_2: Second skill ID

        Returns:
            Similarity analysis
        """
        skill_1 = self.skill_acquisition.get_skill(skill_id_1)
        skill_2 = self.skill_acquisition.get_skill(skill_id_2)

        if not skill_1 or not skill_2:
            raise ValueError("One or both skills not found")

        # Calculate various similarity metrics

        # Step count similarity
        step_count_sim = 1 - abs(len(skill_1.steps) - len(skill_2.steps)) / max(len(skill_1.steps), len(skill_2.steps), 1)

        # Action similarity (Jaccard)
        actions_1 = set(s.action for s in skill_1.steps)
        actions_2 = set(s.action for s in skill_2.steps)
        action_sim = len(actions_1 & actions_2) / len(actions_1 | actions_2) if actions_1 | actions_2 else 0

        # Parameter structure similarity
        params_1 = set()
        params_2 = set()
        for s in skill_1.steps:
            params_1.update(s.parameters.keys())
        for s in skill_2.steps:
            params_2.update(s.parameters.keys())
        param_sim = len(params_1 & params_2) / len(params_1 | params_2) if params_1 | params_2 else 0

        # Domain relationship
        same_domain = skill_1.domain == skill_2.domain
        domain_factor = 1.2 if same_domain else 1.0

        # Overall similarity
        overall = (
            0.3 * step_count_sim +
            0.4 * action_sim +
            0.3 * param_sim
        ) * min(domain_factor, 1.0)

        # Determine transfer type
        if overall > self.similarity_threshold:
            transfer_type = TransferType.NEAR if same_domain else TransferType.FAR
            likely_positive = True
        else:
            transfer_type = TransferType.ZERO
            likely_positive = False

        return {
            "skill_1": {"id": skill_id_1, "name": skill_1.name, "domain": skill_1.domain},
            "skill_2": {"id": skill_id_2, "name": skill_2.name, "domain": skill_2.domain},
            "overall_similarity": overall,
            "components": {
                "step_count": step_count_sim,
                "action_overlap": action_sim,
                "parameter_structure": param_sim
            },
            "same_domain": same_domain,
            "predicted_transfer_type": transfer_type.value,
            "likely_positive_transfer": likely_positive
        }

    def find_transferable_skills(
        self,
        target_domain: str,
        min_similarity: Optional[float] = None
    ) -> List[Dict[str, Any]]:
        """
        Find skills that could transfer to a new domain.

        Args:
            target_domain: The domain to transfer to
            min_similarity: Minimum similarity threshold

        Returns:
            List of transferable skills with similarity scores
        """
        threshold = min_similarity or self.similarity_threshold
        candidates = []

        # Get all skills not in target domain
        source_skills = [
            s for s in self.skill_acquisition.skills.values()
            if s.domain != target_domain
        ]

        # Get a sample skill from target domain for comparison (if exists)
        target_skills = [
            s for s in self.skill_acquisition.skills.values()
            if s.domain == target_domain
        ]

        for source in source_skills:
            # If we have target domain skills, compare against them
            if target_skills:
                similarities = []
                for target in target_skills:
                    sim = self.calculate_skill_similarity(source.skill_id, target.skill_id)
                    similarities.append(sim["overall_similarity"])

                avg_similarity = statistics.mean(similarities)

                if avg_similarity >= threshold:
                    candidates.append({
                        "skill_id": source.skill_id,
                        "skill_name": source.name,
                        "source_domain": source.domain,
                        "similarity_score": avg_similarity,
                        "proficiency": source.proficiency,
                        "transfer_readiness": source.proficiency * avg_similarity
                    })
            else:
                # No target skills - use domain mapping if available
                domain_sim = self.domain_mappings.get(source.domain, {}).get(target_domain, 0.5)

                if domain_sim >= threshold:
                    candidates.append({
                        "skill_id": source.skill_id,
                        "skill_name": source.name,
                        "source_domain": source.domain,
                        "similarity_score": domain_sim,
                        "proficiency": source.proficiency,
                        "transfer_readiness": source.proficiency * domain_sim
                    })

        # Sort by transfer readiness
        candidates.sort(key=lambda x: x["transfer_readiness"], reverse=True)

        logger.info(f"Found {len(candidates)} transferable skills for domain '{target_domain}'")
        return candidates

    def transfer_skill(
        self,
        source_skill_id: str,
        target_domain: str,
        adaptations: Optional[Dict[str, Any]] = None
    ) -> Skill:
        """
        Transfer a skill to a new domain.

        Args:
            source_skill_id: ID of the source skill
            target_domain: Target domain for the transfer
            adaptations: Optional adaptations to apply

        Returns:
            The new transferred skill
        """
        source = self.skill_acquisition.get_skill(source_skill_id)
        if not source:
            raise ValueError(f"Source skill not found: {source_skill_id}")

        # Create adapted steps
        new_steps = []
        for step in source.steps:
            new_step = ProcedureStep(
                step_id=f"step_{uuid.uuid4().hex[:12]}",
                name=step.name,
                action=step.action,
                parameters=step.parameters.copy(),
                preconditions=step.preconditions.copy(),
                postconditions=step.postconditions.copy(),
                expected_duration_ms=step.expected_duration_ms,
                attention_required=min(step.attention_required * 1.2, 1.0)  # Increase attention for new domain
            )

            # Apply adaptations
            if adaptations and step.step_id in adaptations:
                for key, value in adaptations[step.step_id].items():
                    if hasattr(new_step, key):
                        setattr(new_step, key, value)

            new_steps.append(new_step)

        # Create new skill with reduced proficiency
        transfer_penalty = 0.3  # 30% proficiency penalty for transfer
        initial_proficiency = source.proficiency * (1 - transfer_penalty)

        new_skill = self.skill_acquisition.learn_skill(
            name=f"{source.name} ({target_domain})",
            description=f"Transferred from {source.domain}: {source.description}",
            domain=target_domain,
            steps=new_steps,
            metadata={
                "transferred_from": source_skill_id,
                "source_domain": source.domain,
                "transfer_date": datetime.now().isoformat()
            }
        )

        new_skill.proficiency = initial_proficiency
        new_skill.stage = SkillStage.ASSOCIATIVE if initial_proficiency > 0.4 else SkillStage.COGNITIVE

        # Record transfer
        source.transfer_history.append({
            "target_skill_id": new_skill.skill_id,
            "target_domain": target_domain,
            "transfer_date": datetime.now().isoformat()
        })

        self.transfer_attempts.append({
            "source_skill_id": source_skill_id,
            "target_skill_id": new_skill.skill_id,
            "source_domain": source.domain,
            "target_domain": target_domain,
            "initial_proficiency": initial_proficiency,
            "timestamp": datetime.now().isoformat()
        })

        logger.info(f"Transferred skill '{source.name}' to domain '{target_domain}'")
        return new_skill

    def update_domain_mapping(
        self,
        domain_1: str,
        domain_2: str,
        similarity: float
    ):
        """
        Update the similarity mapping between two domains.

        Args:
            domain_1: First domain
            domain_2: Second domain
            similarity: Similarity score (0.0 to 1.0)
        """
        self.domain_mappings[domain_1][domain_2] = similarity
        self.domain_mappings[domain_2][domain_1] = similarity
        logger.info(f"Updated domain mapping: {domain_1} <-> {domain_2} = {similarity}")

    def evaluate_transfer_success(
        self,
        transferred_skill_id: str
    ) -> Dict[str, Any]:
        """
        Evaluate how successful a skill transfer was.

        Args:
            transferred_skill_id: ID of the transferred skill

        Returns:
            Evaluation results
        """
        skill = self.skill_acquisition.get_skill(transferred_skill_id)
        if not skill:
            raise ValueError(f"Skill not found: {transferred_skill_id}")

        # Find transfer record
        transfer_record = None
        for attempt in self.transfer_attempts:
            if attempt["target_skill_id"] == transferred_skill_id:
                transfer_record = attempt
                break

        if not transfer_record:
            return {"error": "No transfer record found"}

        # Calculate success metrics
        initial = transfer_record["initial_proficiency"]
        current = skill.proficiency
        proficiency_change = current - initial

        # Get source skill for comparison
        source = self.skill_acquisition.get_skill(transfer_record["source_skill_id"])
        proficiency_ratio = current / source.proficiency if source else 0

        # Determine transfer type based on outcome
        if proficiency_change > 0:
            actual_type = TransferType.POSITIVE
        elif proficiency_change < -0.1:
            actual_type = TransferType.NEGATIVE
        else:
            actual_type = TransferType.ZERO

        return {
            "skill_id": transferred_skill_id,
            "skill_name": skill.name,
            "source_skill_id": transfer_record["source_skill_id"],
            "transfer_date": transfer_record["timestamp"],
            "initial_proficiency": initial,
            "current_proficiency": current,
            "proficiency_change": proficiency_change,
            "proficiency_ratio_to_source": proficiency_ratio,
            "practice_count": skill.practice_count,
            "transfer_type": actual_type.value,
            "success": proficiency_change >= 0 and skill.practice_count > 0
        }

    def get_transfer_statistics(self) -> Dict[str, Any]:
        """Get overall transfer learning statistics."""
        if not self.transfer_attempts:
            return {"total_transfers": 0}

        successful = 0
        by_domain_pair = defaultdict(int)

        for attempt in self.transfer_attempts:
            try:
                evaluation = self.evaluate_transfer_success(attempt["target_skill_id"])
                if evaluation.get("success"):
                    successful += 1
                pair = f"{attempt['source_domain']} -> {attempt['target_domain']}"
                by_domain_pair[pair] += 1
            except:
                continue

        return {
            "total_transfers": len(self.transfer_attempts),
            "successful_transfers": successful,
            "success_rate": successful / len(self.transfer_attempts),
            "by_domain_pair": dict(by_domain_pair),
            "domain_mappings": {k: dict(v) for k, v in self.domain_mappings.items()}
        }


# =============================================================================
# UNIFIED PROCEDURAL MEMORY SYSTEM
# =============================================================================

class ProceduralMemorySystem:
    """
    Unified interface for AIVA's procedural memory capabilities.

    Coordinates skill acquisition, chunking, automatization tracking,
    motor program execution, error correction, and transfer learning.
    """

    def __init__(self, storage_backend: Optional[Any] = None):
        """
        Initialize the complete procedural memory system.

        Args:
            storage_backend: Optional persistent storage
        """
        # Initialize components
        self.skill_acquisition = SkillAcquisition(storage_backend)
        self.chunking_engine = ChunkingEngine(self.skill_acquisition)
        self.automatization_tracker = AutomatizationTracker(self.skill_acquisition)
        self.motor_program = MotorProgram(
            self.skill_acquisition,
            self.chunking_engine
        )
        self.error_correction = ErrorCorrection(self.skill_acquisition)
        self.transfer_learning = TransferLearning(self.skill_acquisition)

        logger.info("ProceduralMemorySystem initialized with all components")

    def learn_procedure(
        self,
        name: str,
        description: str,
        domain: str,
        steps: List[Dict[str, Any]]
    ) -> Skill:
        """
        Learn a new procedure from step definitions.

        Args:
            name: Procedure name
            description: What the procedure accomplishes
            domain: Domain the procedure belongs to
            steps: List of step definitions

        Returns:
            The created Skill
        """
        # Create procedure steps
        procedure_steps = []
        for step_def in steps:
            step = self.skill_acquisition.define_step(
                name=step_def["name"],
                action=step_def["action"],
                parameters=step_def.get("parameters", {}),
                preconditions=step_def.get("preconditions", []),
                postconditions=step_def.get("postconditions", []),
                expected_duration_ms=step_def.get("expected_duration_ms", 1000),
                attention_required=step_def.get("attention_required", 1.0)
            )
            procedure_steps.append(step)

        # Learn the skill
        return self.skill_acquisition.learn_skill(
            name=name,
            description=description,
            domain=domain,
            steps=procedure_steps
        )

    def execute_procedure(
        self,
        skill_id: str,
        context: Optional[Dict[str, Any]] = None
    ) -> ExecutionTrace:
        """
        Execute a learned procedure.

        Args:
            skill_id: ID of the skill to execute
            context: Execution context

        Returns:
            Execution trace
        """
        trace = self.motor_program.execute_skill(skill_id, context)

        # Track automatization metrics
        if trace.steps_executed:
            first_step = trace.steps_executed[0]
            start = datetime.fromisoformat(trace.start_time)
            step_start = datetime.fromisoformat(first_step["start_time"])
            reaction_time = int((step_start - start).total_seconds() * 1000)

            # Calculate average attention load
            skill = self.skill_acquisition.get_skill(skill_id)
            if skill:
                avg_attention = statistics.mean([s.attention_required for s in skill.steps])
                self.automatization_tracker.record_execution(
                    skill_id,
                    reaction_time,
                    avg_attention
                )

        # Record errors
        for error in trace.errors:
            category = ErrorCategory.COMMISSION  # Default
            if "omission" in error.get("type", "").lower():
                category = ErrorCategory.OMISSION
            elif "sequence" in error.get("type", "").lower():
                category = ErrorCategory.SEQUENCE

            self.error_correction.record_error(
                skill_id=skill_id,
                step_id=error.get("step_id", "unknown"),
                category=category,
                description=error.get("message", "Unknown error"),
                context=trace.context
            )

        return trace

    def get_system_status(self) -> Dict[str, Any]:
        """
        Get comprehensive status of the procedural memory system.

        Returns:
            Status report
        """
        return {
            "timestamp": datetime.now().isoformat(),
            "skill_statistics": self.skill_acquisition.get_skill_stats(),
            "automatization_report": self.automatization_tracker.get_automatization_report(),
            "error_statistics": self.error_correction.get_error_statistics(),
            "transfer_statistics": self.transfer_learning.get_transfer_statistics(),
            "chunks_defined": len(self.chunking_engine.chunks),
            "chunk_suggestions": len(self.chunking_engine.suggest_chunks())
        }

    def serialize(self) -> Dict[str, Any]:
        """Serialize the system state for persistence."""
        return {
            "skills": {k: v.to_dict() for k, v in self.skill_acquisition.skills.items()},
            "steps": {k: v.to_dict() for k, v in self.skill_acquisition.steps_library.items()},
            "chunks": {k: v.to_dict() for k, v in self.chunking_engine.chunks.items()},
            "co_occurrence_matrix": dict(self.chunking_engine.co_occurrence_matrix),
            "error_records": [e.to_dict() for e in self.error_correction.error_records],
            "error_patterns": self.error_correction.error_patterns,
            "transfer_attempts": self.transfer_learning.transfer_attempts,
            "domain_mappings": {k: dict(v) for k, v in self.transfer_learning.domain_mappings.items()}
        }

    def deserialize(self, data: Dict[str, Any]):
        """Restore system state from serialized data."""
        # Restore skills
        for skill_id, skill_data in data.get("skills", {}).items():
            self.skill_acquisition.skills[skill_id] = Skill.from_dict(skill_data)

        # Restore steps
        for step_id, step_data in data.get("steps", {}).items():
            self.skill_acquisition.steps_library[step_id] = ProcedureStep.from_dict(step_data)

        # Restore chunks
        for chunk_id, chunk_data in data.get("chunks", {}).items():
            self.chunking_engine.chunks[chunk_id] = Chunk.from_dict(chunk_data)

        # Restore co-occurrence matrix
        for k, v in data.get("co_occurrence_matrix", {}).items():
            self.chunking_engine.co_occurrence_matrix[k] = defaultdict(int, v)

        # Restore error data
        for error_data in data.get("error_records", []):
            error_data["category"] = ErrorCategory(error_data["category"])
            self.error_correction.error_records.append(ErrorRecord(**error_data))

        self.error_correction.error_patterns = data.get("error_patterns", {})

        # Restore transfer data
        self.transfer_learning.transfer_attempts = data.get("transfer_attempts", [])
        for k, v in data.get("domain_mappings", {}).items():
            self.transfer_learning.domain_mappings[k] = v

        logger.info("ProceduralMemorySystem state restored")


# =============================================================================
# EXAMPLE USAGE AND TESTS
# =============================================================================

if __name__ == "__main__":
    print("=" * 70)
    print("AIVA Procedural Memory System - Test Suite")
    print("=" * 70)

    # Initialize the system
    system = ProceduralMemorySystem()

    # Test 1: Learn a procedure
    print("\n[TEST 1] Learning a new procedure...")

    api_call_skill = system.learn_procedure(
        name="REST API Call",
        description="Make a REST API request with authentication",
        domain="api_integration",
        steps=[
            {
                "name": "Prepare Headers",
                "action": "set_headers",
                "parameters": {"content_type": "application/json"},
                "expected_duration_ms": 50,
                "attention_required": 0.3
            },
            {
                "name": "Add Authentication",
                "action": "add_auth_token",
                "parameters": {"token_type": "Bearer"},
                "preconditions": ["headers_set"],
                "expected_duration_ms": 100,
                "attention_required": 0.5
            },
            {
                "name": "Construct URL",
                "action": "build_url",
                "parameters": {"base_url": "https://api.example.com"},
                "expected_duration_ms": 30,
                "attention_required": 0.4
            },
            {
                "name": "Send Request",
                "action": "http_request",
                "parameters": {"method": "GET"},
                "preconditions": ["headers_set", "url_built"],
                "expected_duration_ms": 500,
                "attention_required": 0.6
            },
            {
                "name": "Parse Response",
                "action": "parse_json",
                "parameters": {},
                "postconditions": ["response_parsed"],
                "expected_duration_ms": 50,
                "attention_required": 0.4
            }
        ]
    )

    print(f"  Created skill: {api_call_skill.name} (ID: {api_call_skill.skill_id})")
    print(f"  Domain: {api_call_skill.domain}")
    print(f"  Steps: {len(api_call_skill.steps)}")
    print(f"  Stage: {api_call_skill.stage.value}")

    # Test 2: Execute the procedure
    print("\n[TEST 2] Executing procedure...")

    trace = system.execute_procedure(
        api_call_skill.skill_id,
        context={"target_endpoint": "/users"}
    )

    print(f"  Execution trace ID: {trace.trace_id}")
    print(f"  Steps executed: {len(trace.steps_executed)}")
    print(f"  Success: {trace.success}")
    print(f"  Errors: {len(trace.errors)}")

    # Test 3: Practice to improve proficiency
    print("\n[TEST 3] Practicing skill multiple times...")

    for i in range(10):
        trace = system.execute_procedure(api_call_skill.skill_id)

    skill = system.skill_acquisition.get_skill(api_call_skill.skill_id)
    print(f"  Practice count: {skill.practice_count}")
    print(f"  Proficiency: {skill.proficiency:.3f}")
    print(f"  Stage: {skill.stage.value}")

    # Test 4: Create and apply chunks
    print("\n[TEST 4] Creating chunks...")

    # Get step IDs
    step_ids = [s.step_id for s in api_call_skill.steps[:3]]

    chunk = system.chunking_engine.create_chunk(
        name="Setup Request",
        chunk_type=ChunkType.SEQUENCE,
        step_ids=step_ids
    )

    print(f"  Created chunk: {chunk.name}")
    print(f"  Type: {chunk.chunk_type.value}")
    print(f"  Component steps: {len(chunk.component_steps)}")

    # Apply chunk to skill
    system.chunking_engine.apply_chunk_to_skill(api_call_skill.skill_id, chunk.chunk_id)
    print(f"  Applied chunk to skill")

    # Test 5: Check automatization
    print("\n[TEST 5] Checking automatization...")

    auto_metrics = system.automatization_tracker.calculate_automatization_index(
        api_call_skill.skill_id
    )

    print(f"  Automatization index: {auto_metrics['automatization_index']:.3f}")
    print(f"  Is automatic: {auto_metrics['is_automatic']}")
    print(f"  Data points: {auto_metrics['data_points']}")

    # Test 6: Record and analyze errors
    print("\n[TEST 6] Error correction...")

    error = system.error_correction.record_error(
        skill_id=api_call_skill.skill_id,
        step_id=api_call_skill.steps[3].step_id,
        category=ErrorCategory.TIMING,
        description="Request timeout",
        context={"endpoint": "/users", "timeout_ms": 5000}
    )

    print(f"  Recorded error: {error.error_id}")

    analysis = system.error_correction.analyze_error_causes(api_call_skill.skill_id)
    print(f"  Total errors: {analysis['total_errors']}")
    print(f"  Hypothesis: {analysis['root_cause_hypothesis']}")

    # Test 7: Transfer learning
    print("\n[TEST 7] Transfer learning...")

    # Learn another similar skill
    graphql_skill = system.learn_procedure(
        name="GraphQL Query",
        description="Execute a GraphQL query",
        domain="graphql_integration",
        steps=[
            {
                "name": "Prepare Headers",
                "action": "set_headers",
                "parameters": {"content_type": "application/json"},
                "expected_duration_ms": 50
            },
            {
                "name": "Build Query",
                "action": "construct_query",
                "parameters": {"query_type": "query"},
                "expected_duration_ms": 100
            },
            {
                "name": "Send Request",
                "action": "http_request",
                "parameters": {"method": "POST"},
                "expected_duration_ms": 500
            }
        ]
    )

    # Calculate similarity
    similarity = system.transfer_learning.calculate_skill_similarity(
        api_call_skill.skill_id,
        graphql_skill.skill_id
    )

    print(f"  Skill similarity: {similarity['overall_similarity']:.3f}")
    print(f"  Predicted transfer type: {similarity['predicted_transfer_type']}")

    # Transfer API skill to WebSocket domain
    transferred = system.transfer_learning.transfer_skill(
        api_call_skill.skill_id,
        target_domain="websocket_integration"
    )

    print(f"  Transferred skill: {transferred.name}")
    print(f"  Initial proficiency: {transferred.proficiency:.3f}")

    # Test 8: Get system status
    print("\n[TEST 8] System status...")

    status = system.get_system_status()
    print(f"  Total skills: {status['skill_statistics']['total_skills']}")
    print(f"  Total steps: {status['skill_statistics']['total_steps_defined']}")
    print(f"  Chunks defined: {status['chunks_defined']}")
    print(f"  Errors recorded: {status['error_statistics']['total_errors']}")

    # Test 9: Serialization
    print("\n[TEST 9] Testing serialization...")

    serialized = system.serialize()
    print(f"  Serialized {len(serialized['skills'])} skills")
    print(f"  Serialized {len(serialized['steps'])} steps")
    print(f"  Serialized {len(serialized['chunks'])} chunks")

    # Create new system and restore
    new_system = ProceduralMemorySystem()
    new_system.deserialize(serialized)

    restored_skill = new_system.skill_acquisition.get_skill(api_call_skill.skill_id)
    print(f"  Restored skill: {restored_skill.name}")
    print(f"  Restored proficiency: {restored_skill.proficiency:.3f}")

    print("\n" + "=" * 70)
    print("All tests completed successfully!")
    print("=" * 70)
