#!/usr/bin/env python3
"""
Genesis Self-Improvement System
================================
Triggers self-improvement after task completion.

Implements the "execute and learn" pattern:
- Analyze task outcome
- Extract learnings
- Update expertise files
- Log improvements to DEVLOG

Usage:
    from self_improve import SelfImprover

    improver = SelfImprover()
    await improver.run_self_improve(
        domain="code_review",
        task_description="Reviewed authentication module",
        outcome="success",
        learnings=["Found pattern X is common", "Tool Y is effective"]
    )
"""

import yaml
import json
import asyncio
from pathlib import Path
from datetime import datetime
from typing import Dict, Any, List, Optional
from dataclasses import dataclass, field


@dataclass
class ImprovementRecord:
    """Record of a self-improvement action."""
    domain: str
    task_description: str
    outcome: str  # "success", "failure", "partial"
    learnings: List[str]
    timestamp: str = field(default_factory=lambda: datetime.now().isoformat())
    expertise_updated: bool = False


class SelfImprover:
    """
    Self-improvement system that updates expertise after tasks.

    Key principle: "One executes and forgets, the other executes and learns."
    This system ensures Genesis LEARNS from every action.
    """

    def __init__(
        self,
        expertise_path: str = "/mnt/e/genesis-system/expertise",
        devlog_path: str = "/mnt/e/genesis-system/DEVLOG.md"
    ):
        self.expertise_path = Path(expertise_path)
        self.devlog_path = Path(devlog_path)
        self.improvement_log_path = self.expertise_path / "improvements.json"
        self._load_improvement_log()

    def _load_improvement_log(self):
        """Load improvement history."""
        self.improvements: List[Dict] = []
        if self.improvement_log_path.exists():
            try:
                with open(self.improvement_log_path) as f:
                    self.improvements = json.load(f)
            except Exception:
                self.improvements = []

    def _save_improvement_log(self):
        """Save improvement history."""
        with open(self.improvement_log_path, 'w') as f:
            json.dump(self.improvements[-100:], f, indent=2)  # Keep last 100

    def _get_expertise_file(self, domain: str) -> Path:
        """Get the expertise file path for a domain."""
        domain_path = self.expertise_path / domain
        domain_path.mkdir(parents=True, exist_ok=True)
        return domain_path / "expertise.yaml"

    def _load_expertise(self, domain: str) -> Dict[str, Any]:
        """Load expertise file for a domain."""
        expertise_file = self._get_expertise_file(domain)
        if expertise_file.exists():
            with open(expertise_file) as f:
                return yaml.safe_load(f) or {}
        return {}

    def _save_expertise(self, domain: str, data: Dict[str, Any]):
        """Save expertise file for a domain."""
        expertise_file = self._get_expertise_file(domain)
        with open(expertise_file, 'w') as f:
            yaml.dump(data, f, default_flow_style=False, sort_keys=False)

    def _append_to_devlog(self, message: str, category: str = "learning"):
        """Append entry to DEVLOG."""
        timestamp = datetime.now().strftime("%Y-%m-%d %H:%M")

        if category == "learning":
            entry = f"- [{timestamp}] LEARNED: {message}\n"
        elif category == "improvement":
            entry = f"- [{timestamp}] SELF-IMPROVE: {message}\n"
        else:
            entry = f"- [{timestamp}] {message}\n"

        with open(self.devlog_path, 'a') as f:
            f.write(entry)

    async def run_self_improve(
        self,
        domain: str,
        task_description: str,
        outcome: str,
        learnings: List[str],
        patterns_discovered: Optional[List[str]] = None,
        issues_found: Optional[List[str]] = None
    ) -> ImprovementRecord:
        """
        Run self-improvement after task completion.

        Args:
            domain: The expertise domain (e.g., "code_review")
            task_description: What task was performed
            outcome: "success", "failure", or "partial"
            learnings: List of things learned
            patterns_discovered: New patterns to add to expertise
            issues_found: Issues to document for future reference

        Returns:
            ImprovementRecord of what was done
        """
        record = ImprovementRecord(
            domain=domain,
            task_description=task_description,
            outcome=outcome,
            learnings=learnings
        )

        # Load current expertise
        expertise = self._load_expertise(domain)

        # Initialize sections if needed
        if "learnings" not in expertise:
            expertise["learnings"] = []
        if "patterns" not in expertise:
            expertise["patterns"] = []
        if "issues" not in expertise:
            expertise["issues"] = []
        if "stats" not in expertise:
            expertise["stats"] = {"success": 0, "failure": 0, "partial": 0}

        # Update stats
        expertise["stats"][outcome] = expertise["stats"].get(outcome, 0) + 1
        expertise["last_updated"] = datetime.now().isoformat()

        # Add learnings
        for learning in learnings:
            learning_entry = {
                "content": learning,
                "task": task_description,
                "timestamp": record.timestamp
            }
            expertise["learnings"].append(learning_entry)

        # Keep only recent learnings (last 50)
        expertise["learnings"] = expertise["learnings"][-50:]

        # Add patterns if discovered
        if patterns_discovered:
            for pattern in patterns_discovered:
                if pattern not in [p.get("name") for p in expertise["patterns"]]:
                    expertise["patterns"].append({
                        "name": pattern,
                        "discovered": record.timestamp,
                        "usage_count": 1
                    })

        # Add issues if found
        if issues_found:
            for issue in issues_found:
                expertise["issues"].append({
                    "description": issue,
                    "found": record.timestamp,
                    "resolved": False
                })

        # Save updated expertise
        self._save_expertise(domain, expertise)
        record.expertise_updated = True

        # Log to DEVLOG
        learning_summary = "; ".join(learnings[:3])
        if len(learnings) > 3:
            learning_summary += f" (+{len(learnings)-3} more)"
        self._append_to_devlog(
            f"[{domain}] {outcome.upper()}: {learning_summary}",
            "learning"
        )

        # Save to improvement log
        self.improvements.append({
            "domain": domain,
            "task": task_description,
            "outcome": outcome,
            "learnings": learnings,
            "timestamp": record.timestamp
        })
        self._save_improvement_log()

        return record

    def get_domain_stats(self, domain: str) -> Dict[str, Any]:
        """Get statistics for a domain."""
        expertise = self._load_expertise(domain)
        stats = expertise.get("stats", {})

        total = sum(stats.values())
        success_rate = stats.get("success", 0) / total if total > 0 else 0

        return {
            "domain": domain,
            "total_tasks": total,
            "success_rate": success_rate,
            "learnings_count": len(expertise.get("learnings", [])),
            "patterns_count": len(expertise.get("patterns", [])),
            "issues_count": len(expertise.get("issues", []))
        }

    def generate_self_improve_prompt(self, domain: str) -> str:
        """Generate a self-improvement prompt for a domain."""
        expertise = self._load_expertise(domain)
        stats = self.get_domain_stats(domain)

        recent_learnings = expertise.get("learnings", [])[-5:]
        recent_issues = [i for i in expertise.get("issues", []) if not i.get("resolved")]

        return f"""# Self-Improvement Analysis for {domain}

## Current Statistics
- Total tasks: {stats['total_tasks']}
- Success rate: {stats['success_rate']:.0%}
- Patterns discovered: {stats['patterns_count']}
- Open issues: {len(recent_issues)}

## Recent Learnings
{chr(10).join([f"- {l.get('content', '')}" for l in recent_learnings])}

## Open Issues to Address
{chr(10).join([f"- {i.get('description', '')}" for i in recent_issues])}

## Self-Improvement Questions
1. What patterns appear repeatedly in the learnings?
2. Are there common failure modes that need new patterns?
3. What expertise is missing that would improve success rate?
4. Are the current patterns being applied consistently?

## Actions
Based on this analysis, update the expertise file with:
- New patterns discovered from analyzing learnings
- Solutions for recurring issues
- Updated best practices based on success patterns
"""


# CLI Interface
if __name__ == "__main__":
    import sys

    if len(sys.argv) < 2:
        print("""
Genesis Self-Improvement System
================================

Commands:
  record <domain> <outcome> <learning>  Record a learning
  stats <domain>                        Show domain statistics
  prompt <domain>                       Generate self-improve prompt
  history [n]                           Show recent improvements

Examples:
  python self_improve.py record code_review success "Found that X pattern works well"
  python self_improve.py stats code_review
  python self_improve.py prompt testing
  python self_improve.py history 10
        """)
        sys.exit(0)

    improver = SelfImprover()
    command = sys.argv[1]

    if command == "record" and len(sys.argv) >= 5:
        domain = sys.argv[2]
        outcome = sys.argv[3]
        learning = " ".join(sys.argv[4:])

        record = asyncio.run(improver.run_self_improve(
            domain=domain,
            task_description="CLI recorded learning",
            outcome=outcome,
            learnings=[learning]
        ))
        print(f"Recorded: {record.domain} - {outcome}")
        print(f"Expertise updated: {record.expertise_updated}")

    elif command == "stats" and len(sys.argv) > 2:
        domain = sys.argv[2]
        stats = improver.get_domain_stats(domain)
        print(json.dumps(stats, indent=2))

    elif command == "prompt" and len(sys.argv) > 2:
        domain = sys.argv[2]
        prompt = improver.generate_self_improve_prompt(domain)
        print(prompt)

    elif command == "history":
        n = int(sys.argv[2]) if len(sys.argv) > 2 else 10
        for imp in improver.improvements[-n:]:
            print(f"[{imp['timestamp']}] {imp['domain']}: {imp['outcome']}")
            for learning in imp['learnings'][:2]:
                print(f"  - {learning[:60]}...")

    else:
        print(f"Unknown command: {command}")
        sys.exit(1)
