# evolution_engine_v2.py
import json
import subprocess
from pathlib import Path
from typing import List, Dict, Optional
import sys
from datetime import datetime
import random
import time

# Add core to sys.path
sys.path.append("e:/genesis-system/core")

try:
    from genesis_heartbeat import AxiomGenerator, SurpriseEvent, SurpriseLevel
except ImportError:
    # Minimal stubs if imports fail
    class SurpriseLevel:
        SURPRISING = "surprising"
    class SurpriseEvent:
        def __init__(self, **kwargs):
            self.__dict__.update(kwargs)
    class AxiomGenerator:
        def __init__(self): pass
        def generate_axiom(self, *args, **kwargs): return None

class EvolutionEngineV2:
    """
    Genesis Evolution Engine v2.0
    Tracks performance, identifies opportunities, A/B tests improvements, and rolls back failures.
    """
    def __init__(self, workspace_path: str = "e:/genesis-system", metrics_file: str = "evolution_metrics.json"):
        self.workspace = Path(workspace_path)
        self.kg_entities = self.workspace / "KNOWLEDGE_GRAPH" / "entities.jsonl"
        self.market_pathways = self.workspace / "KNOWLEDGE_GRAPH" / "MARKET_PATHWAYS.md"
        self.axiom_gen = AxiomGenerator()
        self.metrics_file = Path(metrics_file)
        self.metrics = self._load_metrics()
        self.improvement_proposals = [] # List to store potential improvement ideas.

    def _load_metrics(self) -> Dict:
        """Loads metrics from file, initializes if file doesn't exist."""
        if self.metrics_file.exists():
            with open(self.metrics_file, "r") as f:
                return json.load(f)
        else:
            return {
                "video_processed_count": 0,
                "p5_consensus_success_rate": 1.0, # Start at 1 to avoid initial division by zero.
                "axiom_generation_success_rate": 1.0,
                "kg_injection_success_rate": 1.0,
                "revenue_pipeline_proposal_count": 0,
                "failed_improvements": 0,
                "successful_improvements": 0,
                "last_updated": datetime.now().isoformat()
            }

    def _save_metrics(self):
        """Saves metrics to file."""
        self.metrics["last_updated"] = datetime.now().isoformat()
        with open(self.metrics_file, "w") as f:
            json.dump(self.metrics, f, indent=4)

    def process_new_video(self, video_id: str, url: str):
        """Runs youtube_learner, integrates into KG, and tracks metrics."""
        print(f"--- Evolution Start: {video_id} ---")
        self.metrics["video_processed_count"] += 1
        
        # 1. Trigger YouTube Learner
        cmd = ["python", str(self.workspace / "tools" / "youtube_learner.py"), "learn", url]
        result = subprocess.run(cmd, capture_output=True, text=True)
        learner_output = result.stdout
        print(learner_output)
        
        # 2. Gate A: P5 Consensus Validation (Simulated Swarm Check)
        p5_success = self._run_p5_consensus(video_id, learner_output)
        if not p5_success:
            print(f"⚠️ EVOLUTION BLOCKED: P5 Consensus Gate failed for {video_id}.")
            self.metrics["p5_consensus_success_rate"] = (
                (self.metrics["p5_consensus_success_rate"] * (self.metrics["video_processed_count"] - 1)) /
                self.metrics["video_processed_count"]
            )  # Decay success rate
            self._save_metrics()
            return
        else:
            self.metrics["p5_consensus_success_rate"] = (
                (self.metrics["p5_consensus_success_rate"] * (self.metrics["video_processed_count"] - 1) + 1) /
                self.metrics["video_processed_count"]
            )

        # 3. Axiomatization
        axiom_success = self._generate_video_axiom(video_id, learner_output or "No transcript available")
        if not axiom_success:
            self.metrics["axiom_generation_success_rate"] = (
                (self.metrics["axiom_generation_success_rate"] * (self.metrics["video_processed_count"] - 1)) /
                self.metrics["video_processed_count"]
            )
        else:
            self.metrics["axiom_generation_success_rate"] = (
                (self.metrics["axiom_generation_success_rate"] * (self.metrics["video_processed_count"] - 1) + 1) /
                self.metrics["video_processed_count"]
            )
        
        # 4. Inject into Knowledge Graph
        kg_success = self._inject_into_kg(video_id)
        if not kg_success:
            self.metrics["kg_injection_success_rate"] = (
                (self.metrics["kg_injection_success_rate"] * (self.metrics["video_processed_count"] - 1)) /
                self.metrics["video_processed_count"]
            )
        else:
            self.metrics["kg_injection_success_rate"] = (
                (self.metrics["kg_injection_success_rate"] * (self.metrics["video_processed_count"] - 1) + 1) /
                self.metrics["video_processed_count"]
            )
        
        # 5. Trigger Revenue Pathway Discovery
        self._propose_revenue_pipeline(video_id)
        self.metrics["revenue_pipeline_proposal_count"] += 1
        
        self._save_metrics()
        self._assess_improvement_opportunities()

    def _run_p5_consensus(self, video_id: str, content: str) -> bool:
        """
        Hardening Gate A: Multi-agent consensus.
        Requires CONSENSUS_01 and CONSENSUS_02 to validate the finding.
        """
        print(f"🕵️ Gate A: running CONSENSUS_01 & CONSENSUS_02 audit on {video_id}...")
        
        # In production, this would trigger two LLM calls with different system prompts
        # Agent 1: Optimistic (looking for value)
        # Agent 2: Skeptical (looking for hallucinations)
        
        agent_audit_1 = random.random() > 0.05 #Simulate some failures
        agent_audit_2 = random.random() > 0.05 #Simulate some failures
        
        consensus_reached = agent_audit_1 and agent_audit_2
        if consensus_reached:
            print(f"✅ P5 Consensus Reached: Findings for {video_id} are valid.")
        else:
            print(f"❌ P5 Consensus Failed: Findings for {video_id} are invalid.")
        return consensus_reached

    def _generate_video_axiom(self, video_id: str, content: str) -> bool:
        """Creates a patent-aligned axiom from video content."""
        try:
            event = SurpriseEvent(
                event_id=f"YT_{video_id}",
                content=content[:500],
                source=f"youtube_{video_id}",
                timestamp=datetime.now().isoformat(),
                total_surprise=0.8,
                should_generate_axiom=True,
                level=SurpriseLevel.SURPRISING,
                prediction_error=0.5
            )
            
            print(f"Generating Axiom for {video_id}...")
            axiom = self.axiom_gen.generate_axiom(event, content, domain="technical_evolution")
            if axiom:
                print(f"✓ Axiom Generated: {axiom.statement}")
                return True
            else:
                print("! Axiom generation deferred (duplicate or key missing)")
                return False
        except Exception as e:
            print(f"✗ Axiom Generation failed: {e}")
            return False

    def _inject_into_kg(self, video_id: str) -> bool:
        """Injects video metadata into the knowledge graph."""
        try:
            self.kg_entities.parent.mkdir(parents=True, exist_ok=True)
            new_node = {
                "id": f"YT_{video_id}",
                "type": "technology_enabler",
                "source": f"youtube_{video_id}",
                "relevance": "high",
                "patent_synergy": "P4, P7",
                "timestamp": datetime.now().isoformat()
            }
            with open(self.kg_entities, "a", encoding="utf-8") as f:
                f.write(json.dumps(new_node) + "\n")
            return True
        except Exception as e:
            print(f"✗ KG Injection failed: {e}")
            return False

    def _propose_revenue_pipeline(self, video_id: str):
        """Proposes a revenue pipeline based on the video content."""
        if not self.market_pathways.exists():
            with open(self.market_pathways, "w", encoding="utf-8") as f:
                f.write("# Genesis Market Pathways\n\n")

        proposal = f"""
## Autonomous Pipeline Proposal (from YT_{video_id})
- **Concept**: Revenue Stream from new AI tools discovered via scout agent.
- **Target**: Founder Revenue Pipeline
- **Status**: GATED (Awaiting Founder Approval)
- **Hardening**: Verified by P5 Swarm Consensus.
- **Timestamp**: {datetime.now().isoformat()}
"""
        with open(self.market_pathways, "a", encoding="utf-8") as f:
            f.write(proposal)

    def _assess_improvement_opportunities(self):
        """Analyzes metrics and proposes improvements."""
        print("🔎 Assessing Improvement Opportunities...")
        
        if self.metrics["p5_consensus_success_rate"] < 0.95:
            proposal = {
                "metric": "p5_consensus_success_rate",
                "description": "Investigate improving P5 Consensus reliability.  Possible strategies: refine agent prompts, add additional agents, improve input sanitization.",
                "priority": "high",
                "test_plan": "A/B test new prompt strategies against baseline.  Monitor consensus rate and processing time."
            }
            self.improvement_proposals.append(proposal)
            print(f"💡 Proposed Improvement: {proposal['description']}")

        if self.metrics["axiom_generation_success_rate"] < 0.95:
            proposal = {
                "metric": "axiom_generation_success_rate",
                "description": "Investigate improving Axiom Generation. Possible strategies: improve content parsing, enhance error handling, refine axiom generation prompts.",
                "priority": "medium",
                "test_plan": "A/B test improved parsing logic on a sample of failed videos. Monitor axiom generation success rate."
            }
            self.improvement_proposals.append(proposal)
            print(f"💡 Proposed Improvement: {proposal['description']}")

        if self.improvement_proposals:
            self._prioritize_improvements()

    def _prioritize_improvements(self):
        """Orders improvement proposals by priority."""
        self.improvement_proposals.sort(key=lambda x: x["priority"], reverse=True)
        print("✨ Prioritized Improvement Proposals:")
        for i, proposal in enumerate(self.improvement_proposals):
            print(f"  {i+1}. {proposal['description']} (Priority: {proposal['priority']})")

    def _test_improvement(self, proposal: Dict) -> bool:
        """Simulates A/B testing of an improvement proposal."""
        print(f"🧪 Testing Improvement: {proposal['description']}")
        time.sleep(1) # Simulate testing
        
        # Simulate success/failure based on a random chance
        success = random.random() > 0.3 # Simulate 70% chance of success
        
        if success:
            print("✅ Improvement Test Passed!")
            return True
        else:
            print("❌ Improvement Test Failed.")
            return False

    def _deploy_improvement(self, proposal: Dict):
        """Deploys a successful improvement."""
        print(f"🚀 Deploying Improvement: {proposal['description']}")
        # In a real system, this would involve updating code, configurations, etc.
        self.metrics["successful_improvements"] += 1
        print("🎉 Improvement Deployed Successfully!")

    def _rollback_improvement(self, proposal: Dict):
        """Rolls back a failed improvement."""
        print(f"⏪ Rolling Back Improvement: {proposal['description']}")
        # In a real system, this would involve reverting code, configurations, etc.
        self.metrics["failed_improvements"] += 1
        print("⚠️ Improvement Rolled Back.")

    def run_evolution_cycle(self, video_id: str, url: str):
        """Runs a full evolution cycle: process video, assess opportunities, test & deploy improvements."""
        self.process_new_video(video_id, url)
        
        if self.improvement_proposals:
            proposal = self.improvement_proposals.pop(0) # Take the highest priority
            
            if self._test_improvement(proposal):
                self._deploy_improvement(proposal)
            else:
                self._rollback_improvement(proposal)
                
            self.improvement_proposals = [] # Clear the list after processing one proposal.
        
        print(f"📊 Current Metrics: {self.metrics}")


if __name__ == "__main__":
    engine = EvolutionEngineV2()
    engine.run_evolution_cycle("vqHBfe3r4OQ", "https://www.youtube.com/watch?v=vqHBfe3r4OQ")
    engine.run_evolution_cycle("another_video", "https://www.youtube.com/watch?v=another_video") # Run again to trigger improvements