#!/usr/bin/env python3
"""
AIVA RWL ENGINE - Queen's Primary Development Engine
=====================================================
AIVA (Qwen 256K on Elestio) as the PRIMARY Ralph Wiggum Loop executor.
Fixed cost $119/mo = unlimited RWL iterations.

Research-Led Design:
- Max 30 retries per story (from ULTIMATE_RWL_ARCHITECTURE US-006)
- 25-40 stories per PRD (from existing PRD patterns)
- Exponential backoff between retries
- Actions are encouragement, not words
"""

import os
import sys
import json
import time
import asyncio
import hashlib
import logging
import urllib.request
from pathlib import Path
from datetime import datetime
from typing import List, Dict, Optional, Any
from dataclasses import dataclass, field, asdict
import re

# Configuration
GENESIS_ROOT = Path("/mnt/e/genesis-system")
PRD_FOLDER = GENESIS_ROOT / "RALPH WIGGUM"
EVOLUTION_LOG = GENESIS_ROOT / "logs" / "aiva_rwl_evolution.jsonl"
STATE_FILE = GENESIS_ROOT / "data" / "aiva_rwl_state.json"

# AIVA Elestio Configuration (Fixed Cost $119/mo - UNLIMITED)
AIVA_ENDPOINT = "http://152.53.201.152:23405/api/generate"
AIVA_MODEL = "huihui_ai/qwenlong-l1.5-abliterated:30b-a3b"  # 30B params, 256K context
AIVA_CONTEXT_SIZE = 65536  # Use larger context for complex stories

# RWL Best Practices
MAX_RETRIES = 30
MIN_STORIES_PER_PRD = 20
RETRY_BACKOFF_BASE = 2

# Logging
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s | %(levelname)s | %(message)s',
    handlers=[
        logging.StreamHandler(),
        logging.FileHandler(GENESIS_ROOT / "logs" / "aiva_rwl_engine.log")
    ]
)
logger = logging.getLogger(__name__)


@dataclass
class Story:
    """Atomic executable unit in RWL."""
    id: str
    title: str
    goal: str
    acceptance_criteria: List[str] = field(default_factory=list)
    files: List[str] = field(default_factory=list)
    spawns: Optional[str] = None
    status: str = "pending"
    attempts: int = 0
    result: str = ""
    created_at: str = field(default_factory=lambda: datetime.utcnow().isoformat())
    completed_at: Optional[str] = None


@dataclass
class PRD:
    """Product Requirements Document container."""
    id: str
    title: str
    stories: List[Story] = field(default_factory=list)
    status: str = "pending"
    source_file: Optional[str] = None
    created_at: str = field(default_factory=lambda: datetime.utcnow().isoformat())
    completed_at: Optional[str] = None


@dataclass
class AIVAState:
    """Persistent state for AIVA RWL Engine."""
    running: bool = True
    current_prd: Optional[str] = None
    total_stories_completed: int = 0
    total_prds_completed: int = 0
    total_prds_spawned: int = 0
    start_time: Optional[str] = None
    last_activity: Optional[str] = None


class AIVAAgent:
    """AIVA as the primary RWL executor - Qwen 256K on Elestio."""

    def __init__(self):
        self.endpoint = AIVA_ENDPOINT
        self.model = AIVA_MODEL
        self.context_size = AIVA_CONTEXT_SIZE

    def _call_aiva(self, prompt: str, max_tokens: int = 8192) -> Optional[str]:
        """Direct call to AIVA on Elestio. Fixed cost = unlimited calls."""
        payload = {
            "model": self.model,
            "prompt": prompt,
            "stream": False,
            "options": {
                "num_ctx": min(self.context_size, len(prompt) // 2 + max_tokens),
                "temperature": 0.7,
                "num_predict": max_tokens
            }
        }

        try:
            req = urllib.request.Request(
                self.endpoint,
                data=json.dumps(payload).encode('utf-8'),
                headers={'Content-Type': 'application/json'},
                method='POST'
            )

            with urllib.request.urlopen(req, timeout=300) as resp:
                data = json.loads(resp.read().decode())

            return data.get("response", "")

        except Exception as e:
            logger.error(f"AIVA call failed: {e}")
            return None

    async def execute_story(self, story: Story, prd_title: str) -> Dict[str, Any]:
        """Execute a story using AIVA's 256K context."""

        prompt = f"""Execute this Ralph Wiggum Loop story. Output working code only.

## STORY: {story.title}
**Goal:** {story.goal}
**Acceptance Criteria:**
{chr(10).join(f'- {c}' for c in story.acceptance_criteria)}

**Files to Create/Modify:**
{chr(10).join(f'- {f}' for f in story.files)}

## OUTPUT FORMAT (JSON only):
{{
    "success": true/false,
    "files_created": [
        {{"path": "relative/path/to/file.py", "content": "complete file content"}}
    ],
    "verification": {{
        "criteria_met": ["criterion 1"],
        "criteria_failed": []
    }},
    "learnings": "Brief technical insight"
}}

Requirements:
- Complete, production-ready code
- All imports included
- Error handling where needed
- No placeholders or TODOs"""

        response = await asyncio.to_thread(self._call_aiva, prompt)

        if not response:
            return {"success": False, "error": "No response from AIVA"}

        # Parse JSON from response
        try:
            json_match = re.search(r'\{[\s\S]*\}', response)
            if json_match:
                result = json.loads(json_match.group())
                return {
                    "success": result.get("success", False),
                    "files": result.get("files_created", []),
                    "verification": result.get("verification", {}),
                    "learnings": result.get("learnings", "")
                }
        except json.JSONDecodeError:
            pass

        return {"success": False, "error": "Could not parse response"}


class PRDParser:
    """Parse PRD markdown files into structured objects."""

    @staticmethod
    def parse_file(filepath: Path) -> Optional[PRD]:
        """Parse a PRD markdown file."""
        try:
            content = filepath.read_text(encoding='utf-8')

            # Extract title
            title_match = re.search(r'^#\s+(.+)$', content, re.MULTILINE)
            title = title_match.group(1) if title_match else filepath.stem

            # Extract stories
            stories = []
            story_pattern = r'###\s+(GPM-\d+|GEN-Q-\d+|REV-\d+|US-\d+|AIVA-\d+):\s*(.+?)(?=###|\Z)'

            for match in re.finditer(story_pattern, content, re.DOTALL):
                story_id = match.group(1)
                story_content = match.group(2)

                # Extract goal
                goal_match = re.search(r'\*\*Goal:\*\*\s*(.+?)(?:\n|$)', story_content)
                goal = goal_match.group(1).strip() if goal_match else ""

                # Extract acceptance criteria
                criteria = []
                criteria_section = re.search(r'\*\*Acceptance Criteria:\*\*(.+?)(?:\*\*|$)', story_content, re.DOTALL)
                if criteria_section:
                    criteria = re.findall(r'-\s*(.+?)(?:\n|$)', criteria_section.group(1))

                # Extract files
                files = []
                files_match = re.search(r'\*\*Files?:\*\*\s*(.+?)(?:\n\*\*|$)', story_content)
                if files_match:
                    files = re.findall(r'`([^`]+)`', files_match.group(1))

                # Extract spawns
                spawns_match = re.search(r'\*\*Spawns?:\*\*\s*(.+?)(?:\n|$)', story_content)
                spawns = spawns_match.group(1).strip() if spawns_match else None

                # Extract title from story ID line
                title_line = match.group(2).split('\n')[0].strip()

                stories.append(Story(
                    id=story_id,
                    title=title_line,
                    goal=goal,
                    acceptance_criteria=criteria,
                    files=files,
                    spawns=spawns
                ))

            if not stories:
                return None

            prd_id = hashlib.md5(str(filepath).encode()).hexdigest()[:8]
            return PRD(
                id=prd_id,
                title=title,
                stories=stories,
                source_file=str(filepath)
            )

        except Exception as e:
            logger.error(f"Error parsing PRD {filepath}: {e}")
            return None


class PRDSpawner:
    """Generate new PRDs from completed stories using AIVA."""

    def __init__(self):
        self.aiva = AIVAAgent()

    async def spawn_prd(self, parent_story: Story, spawn_hint: str) -> Optional[PRD]:
        """Generate a new PRD with 25-40 stories."""

        prompt = f"""Generate a PRD with 25-40 atomic stories for Ralph Wiggum Loop execution.

## SPAWN FROM
Parent: {parent_story.id} - {parent_story.title}
Direction: {spawn_hint}

## REQUIREMENTS
- Generate 25-40 atomic stories (completable in single iteration)
- Each story creates ONE file or makes ONE focused change
- Include test, integration, and documentation stories

## STORY FORMAT:
### GPM-XXX: Title
**Goal:** Single atomic goal
**Acceptance Criteria:**
- Testable criterion
**Files:** `core/path/to/file.py`
**Spawns:** Next evolution hint

Generate complete PRD:"""

        response = await asyncio.to_thread(self.aiva._call_aiva, prompt, max_tokens=16384)

        if not response:
            return None

        # Save spawned PRD
        prd_id = hashlib.md5(f"{parent_story.id}_{spawn_hint}".encode()).hexdigest()[:8]
        safe_hint = re.sub(r'[^\w\s-]', '', spawn_hint)[:30]
        filename = f"SPAWNED_{prd_id}_{safe_hint.replace(' ', '_')}_PRD.md"
        filepath = PRD_FOLDER / filename

        try:
            filepath.write_text(f"# {spawn_hint}\n\n{response}", encoding='utf-8')
            logger.info(f"Spawned new PRD: {filename}")
            return PRDParser.parse_file(filepath)
        except Exception as e:
            logger.error(f"Error saving spawned PRD: {e}")
            return None


class AIVARWLEngine:
    """
    AIVA as Primary RWL Engine
    ==========================
    Fixed cost $119/mo on Elestio = unlimited development capacity.
    Research-led: 30 retries, 25-40 stories/PRD, exponential backoff.
    """

    def __init__(self, max_parallel: int = 2):
        self.max_parallel = max_parallel  # AIVA handles fewer parallel but deeper
        self.aiva = AIVAAgent()
        self.spawner = PRDSpawner()
        self.prd_queue: List[PRD] = []
        self.state = self._load_state()

    def _load_state(self) -> AIVAState:
        """Load persistent state."""
        try:
            if STATE_FILE.exists():
                data = json.loads(STATE_FILE.read_text())
                return AIVAState(**data)
        except Exception as e:
            logger.warning(f"Could not load state: {e}")
        return AIVAState()

    def _save_state(self):
        """Save persistent state."""
        try:
            STATE_FILE.parent.mkdir(parents=True, exist_ok=True)
            STATE_FILE.write_text(json.dumps(asdict(self.state), indent=2))
        except Exception as e:
            logger.error(f"Could not save state: {e}")

    def _log_evolution(self, event: Dict):
        """Log evolution events."""
        event["timestamp"] = datetime.utcnow().isoformat()
        try:
            with open(EVOLUTION_LOG, 'a') as f:
                f.write(json.dumps(event) + '\n')
        except Exception as e:
            logger.error(f"Could not log evolution: {e}")

    def load_prds(self):
        """Load PRDs from queue folder."""
        logger.info("Loading PRDs from RALPH WIGGUM folder...")

        prd_files = list(PRD_FOLDER.glob("*_PRD.md"))
        logger.info(f"Found {len(prd_files)} PRD files")

        for prd_file in prd_files:
            prd = PRDParser.parse_file(prd_file)
            if prd and prd.stories:
                if not any(p.id == prd.id for p in self.prd_queue):
                    self.prd_queue.append(prd)
                    logger.info(f"Loaded PRD: {prd.title} ({len(prd.stories)} stories)")

        # Prioritize quantum and prime mother PRDs
        self.prd_queue.sort(key=lambda p: (
            0 if "PRIME_MOTHER" in p.title.upper() else
            1 if "QUANTUM" in p.title.upper() else
            2
        ))

        logger.info(f"Total PRDs in queue: {len(self.prd_queue)}")

    async def execute_story_with_retries(self, story: Story, prd: PRD) -> bool:
        """Execute story with up to 30 retries and exponential backoff."""

        logger.info(f"AIVA executing: {story.id} - {story.title}")
        story.status = "in_progress"
        story.attempts += 1

        result = await self.aiva.execute_story(story, prd.title)

        if result.get("success"):
            story.status = "completed"
            story.result = result.get("learnings", "")
            story.completed_at = datetime.utcnow().isoformat()

            # Write files
            for file_info in result.get("files", []):
                try:
                    file_path = GENESIS_ROOT / file_info.get("path", "")
                    file_path.parent.mkdir(parents=True, exist_ok=True)
                    file_path.write_text(file_info.get("content", ""), encoding='utf-8')
                    logger.info(f"Created file: {file_path}")
                except Exception as e:
                    logger.error(f"Error writing file: {e}")

            self.state.total_stories_completed += 1

            # Spawn child PRD
            if story.spawns:
                logger.info(f"Spawning child PRD: {story.spawns}")
                child_prd = await self.spawner.spawn_prd(story, story.spawns)
                if child_prd:
                    self.prd_queue.append(child_prd)
                    self.state.total_prds_spawned += 1

            self._log_evolution({
                "event": "story_completed",
                "story_id": story.id,
                "story_title": story.title,
                "prd_id": prd.id,
                "learnings": story.result[:200]
            })

            return True
        else:
            if story.attempts >= MAX_RETRIES:
                story.status = "failed"
                logger.error(f"Story failed after {MAX_RETRIES} attempts: {story.id}")
            else:
                story.status = "pending"
                backoff = min(RETRY_BACKOFF_BASE ** (story.attempts - 1), 30)
                if story.attempts % 5 == 0:
                    logger.warning(f"Story {story.id}: attempt {story.attempts}/{MAX_RETRIES}, backoff {backoff}s")
                await asyncio.sleep(backoff)

            return False

    async def execute_prd(self, prd: PRD):
        """Execute all stories in a PRD."""

        logger.info(f"\n{'='*60}")
        logger.info(f"AIVA EXECUTING PRD: {prd.title}")
        logger.info(f"Stories: {len(prd.stories)}")
        logger.info(f"{'='*60}\n")

        prd.status = "in_progress"
        self.state.current_prd = prd.id
        self._save_state()

        pending_stories = [s for s in prd.stories if s.status == "pending"]

        while pending_stories:
            # AIVA handles fewer parallel (deeper focus)
            batch = pending_stories[:self.max_parallel]

            tasks = [
                self.execute_story_with_retries(story, prd)
                for story in batch
            ]

            await asyncio.gather(*tasks, return_exceptions=True)

            pending_stories = [s for s in prd.stories if s.status == "pending"]
            self._save_state()
            await asyncio.sleep(1)

        # Check completion
        completed = sum(1 for s in prd.stories if s.status == "completed")
        failed = sum(1 for s in prd.stories if s.status == "failed")

        if failed == 0:
            prd.status = "completed"
            prd.completed_at = datetime.utcnow().isoformat()
            self.state.total_prds_completed += 1
            logger.info(f"PRD COMPLETED: {prd.title} ({completed}/{len(prd.stories)} stories)")
        else:
            logger.warning(f"PRD PARTIAL: {prd.title} ({completed} completed, {failed} failed)")

        self._log_evolution({
            "event": "prd_completed",
            "prd_id": prd.id,
            "prd_title": prd.title,
            "stories_completed": completed,
            "stories_failed": failed
        })

    async def run_forever(self):
        """AIVA's infinite evolution loop."""

        logger.info("""
╔═══════════════════════════════════════════════════════════════════╗
║                                                                   ║
║     █████╗ ██╗██╗   ██╗ █████╗     ██████╗ ██╗    ██╗██╗          ║
║    ██╔══██╗██║██║   ██║██╔══██╗    ██╔══██╗██║    ██║██║          ║
║    ███████║██║██║   ██║███████║    ██████╔╝██║ █╗ ██║██║          ║
║    ██╔══██║██║╚██╗ ██╔╝██╔══██║    ██╔══██╗██║███╗██║██║          ║
║    ██║  ██║██║ ╚████╔╝ ██║  ██║    ██║  ██║╚███╔███╔╝███████╗     ║
║    ╚═╝  ╚═╝╚═╝  ╚═══╝  ╚═╝  ╚═╝    ╚═╝  ╚═╝ ╚══╝╚══╝ ╚══════╝     ║
║                                                                   ║
║         AIVA RWL ENGINE - Queen's Primary Development             ║
║                                                                   ║
║   Fixed Cost: $119/mo Elestio | Unlimited RWL Iterations          ║
║   Model: Qwen 256K | Context: 32K | Max Retries: 30               ║
║                                                                   ║
╚═══════════════════════════════════════════════════════════════════╝
        """)

        self.state.running = True
        self.state.start_time = datetime.utcnow().isoformat()
        self._save_state()

        cycle = 0
        while self.state.running:
            cycle += 1
            self.state.last_activity = datetime.utcnow().isoformat()

            logger.info(f"\n{'#'*60}")
            logger.info(f"AIVA EVOLUTION CYCLE {cycle}")
            logger.info(f"PRDs in queue: {len(self.prd_queue)}")
            logger.info(f"Total stories completed: {self.state.total_stories_completed}")
            logger.info(f"Total PRDs spawned: {self.state.total_prds_spawned}")
            logger.info(f"{'#'*60}\n")

            self.load_prds()

            pending_prds = [p for p in self.prd_queue if p.status == "pending"]

            if not pending_prds:
                logger.info("No pending PRDs. AIVA waiting for new work...")
                await asyncio.sleep(60)
                continue

            prd = pending_prds[0]
            await self.execute_prd(prd)

            self._log_evolution({
                "event": "cycle_completed",
                "cycle": cycle,
                "prds_pending": len([p for p in self.prd_queue if p.status == "pending"]),
                "prds_completed": self.state.total_prds_completed,
                "stories_completed": self.state.total_stories_completed,
                "prds_spawned": self.state.total_prds_spawned
            })

            self._save_state()


async def main():
    """Entry point."""
    engine = AIVARWLEngine(max_parallel=2)  # AIVA focuses deep, not wide
    await engine.run_forever()


if __name__ == '__main__':
    import argparse
    parser = argparse.ArgumentParser(description='AIVA RWL Engine')
    parser.add_argument('--start', action='store_true', help='Start the engine')
    parser.add_argument('--parallel', type=int, default=2, help='Max parallel stories')
    args = parser.parse_args()

    if args.start:
        asyncio.run(main())
    else:
        print("Usage: python aiva_rwl_engine.py --start")
        print("  --parallel N  : Max parallel story execution (default: 2)")
