"""
Ralph Wiggum TRUE Method Orchestrator
=====================================
Executes PRD user stories US-001 through US-018 via fresh Claude Opus API calls.

Each story gets:
- Fresh API session (no conversation history)
- Full codebase context
- Response saved to /logs/US-{number}_output.md
- Progress tracked in progress.md

"I'm helping! Fresh context every time!" - Ralph Wiggum
"""

import os
import re
import json
import time
from datetime import datetime
from pathlib import Path
from typing import Optional, Dict, List, Any

try:
    import anthropic
except ImportError:
    print("Installing anthropic...")
    os.system("pip install anthropic")
    import anthropic


# Configuration
ANTHROPIC_API_KEY = "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"
PRD_PATH = Path("/mnt/e/genesis-system/RALPH WIGGUM/GENESIS_RALPH_TRUE_METHOD_PRD.md")
LOGS_DIR = Path("/mnt/e/genesis-system/logs")
PROGRESS_FILE = Path("/mnt/e/genesis-system/logs/ralph_progress.md")
GENESIS_ROOT = Path("/mnt/e/genesis-system")

# Model configuration
MODEL = "claude-sonnet-4-20250514"  # Using Sonnet for cost efficiency
MAX_TOKENS = 8192


class RalphOrchestrator:
    """
    Orchestrates execution of PRD user stories via fresh Claude API calls.

    Key principle: Each story = fresh API call with clean context.
    No conversation history accumulation.
    """

    def __init__(self):
        self.client = anthropic.Anthropic(
            api_key=ANTHROPIC_API_KEY,
            timeout=120.0  # 2 minute timeout
        )
        self.prd_content = ""
        self.stories: List[Dict[str, Any]] = []
        self.completed: List[str] = []
        self.failed: List[str] = []

        # Ensure logs directory exists
        LOGS_DIR.mkdir(parents=True, exist_ok=True)

        print("=" * 60)
        print("RALPH WIGGUM TRUE METHOD ORCHESTRATOR")
        print("Fresh API Call Per Story - No Context Accumulation")
        print("=" * 60)

    def load_prd(self) -> bool:
        """Load and parse the PRD document."""
        print(f"\n[1] Loading PRD from {PRD_PATH}...")

        if not PRD_PATH.exists():
            print(f"ERROR: PRD not found at {PRD_PATH}")
            return False

        self.prd_content = PRD_PATH.read_text(encoding='utf-8')
        print(f"    PRD loaded: {len(self.prd_content)} characters")

        # Parse user stories
        self._parse_stories()
        print(f"    Found {len(self.stories)} user stories")

        return True

    def _parse_stories(self) -> None:
        """Extract user stories from PRD content."""
        # Pattern to match US-XXX story headers
        pattern = r'#### \*\*US-(\d{3}): ([^*]+)\*\*'
        matches = re.findall(pattern, self.prd_content)

        for num, title in matches:
            story_id = f"US-{num}"

            # Extract the full story section
            story_pattern = rf'#### \*\*{story_id}: {re.escape(title)}\*\*(.*?)(?=####|\Z)'
            story_match = re.search(story_pattern, self.prd_content, re.DOTALL)

            if story_match:
                story_content = story_match.group(0)
                self.stories.append({
                    "id": story_id,
                    "number": num,
                    "title": title.strip(),
                    "content": story_content.strip()
                })

    def get_codebase_context(self) -> str:
        """Gather relevant codebase context for the AI."""
        context_parts = []

        # Key files to include
        key_files = [
            "core/api_token_manager.py",
            "core/aiva_orchestrator.py",
            "HANDOFF.md",
            "CLAUDE.md",
        ]

        for file_path in key_files:
            full_path = GENESIS_ROOT / file_path
            if full_path.exists():
                try:
                    content = full_path.read_text(encoding='utf-8')[:3000]  # First 3k chars
                    context_parts.append(f"### {file_path}\n```python\n{content}\n```\n")
                except Exception:
                    pass

        # Directory structure
        try:
            core_files = list((GENESIS_ROOT / "core").glob("*.py"))[:20]
            dir_listing = "\n".join([f"  - {f.name}" for f in core_files])
            context_parts.append(f"### Core Directory Structure\n{dir_listing}\n")
        except Exception:
            pass

        return "\n".join(context_parts)

    def execute_story(self, story: Dict[str, Any]) -> Dict[str, Any]:
        """
        Execute a single user story via fresh Claude API call.

        CRITICAL: This creates a NEW API client instance for true isolation.
        No conversation history is passed.
        """
        story_id = story["id"]
        story_title = story["title"]
        story_content = story["content"]

        print(f"\n{'='*60}")
        print(f"Processing {story_id}: {story_title}")
        print(f"{'='*60}")
        print(f"Making FRESH API call to Claude {MODEL}...")

        # Get codebase context
        codebase_context = self.get_codebase_context()

        # Build the prompt
        prompt = f"""You are implementing a user story for the Genesis AI system.

## User Story
{story_content}

## Codebase Context
{codebase_context}

## Instructions
1. Analyze the acceptance criteria carefully
2. Provide the COMPLETE implementation code for all files mentioned
3. Include any necessary imports and dependencies
4. Explain your implementation decisions
5. Provide test commands to verify the implementation

## Output Format
Provide your response in this structure:

### Analysis
[Your understanding of what needs to be built]

### Implementation

#### File: [filename]
```python
[complete code]
```

### Dependencies
[Any pip packages or system dependencies needed]

### Verification Commands
```bash
[Commands to test the implementation]
```

### Notes
[Any important considerations or gotchas]
"""

        start_time = time.time()

        try:
            # Retry logic for transient connection errors
            max_retries = 3
            response = None

            for retry in range(max_retries):
                try:
                    if retry > 0:
                        print(f"    Retry {retry}/{max_retries-1}...")
                        time.sleep(5)  # Wait before retry

                    # FRESH API CALL - no conversation history
                    response = self.client.messages.create(
                        model=MODEL,
                        max_tokens=MAX_TOKENS,
                        messages=[
                            {"role": "user", "content": prompt}
                        ]
                    )
                    break  # Success, exit retry loop

                except anthropic.APIConnectionError as e:
                    print(f"    Connection error (attempt {retry+1}/{max_retries}): {e}")
                    if retry == max_retries - 1:
                        raise  # Re-raise on final attempt
                    continue

            duration = time.time() - start_time
            response_text = response.content[0].text

            print(f"    Response received in {duration:.1f}s")
            print(f"    Response length: {len(response_text)} characters")
            print(f"    Tokens used: {response.usage.input_tokens} in, {response.usage.output_tokens} out")

            # Save response to file
            output_file = LOGS_DIR / f"{story_id}_output.md"
            self._save_output(story, response_text, duration, response.usage, output_file)

            result = {
                "story_id": story_id,
                "success": True,
                "duration": duration,
                "tokens_in": response.usage.input_tokens,
                "tokens_out": response.usage.output_tokens,
                "output_file": str(output_file)
            }

            self.completed.append(story_id)
            print(f"    ✅ {story_id} COMPLETE - saved to {output_file.name}")

            return result

        except Exception as e:
            duration = time.time() - start_time
            error_msg = str(e)
            print(f"    ❌ ERROR: {error_msg}")

            # Save error to file
            output_file = LOGS_DIR / f"{story_id}_output.md"
            self._save_error(story, error_msg, output_file)

            self.failed.append(story_id)

            return {
                "story_id": story_id,
                "success": False,
                "error": error_msg,
                "duration": duration
            }

    def _save_output(self, story: Dict, response: str, duration: float,
                     usage: Any, output_file: Path) -> None:
        """Save successful response to output file."""
        content = f"""# {story['id']}: {story['title']}
**Generated:** {datetime.now().isoformat()}
**Model:** {MODEL}
**Duration:** {duration:.1f}s
**Tokens:** {usage.input_tokens} in / {usage.output_tokens} out

---

## Original Story

{story['content']}

---

## Claude Response

{response}

---

*Generated by Ralph Wiggum TRUE Method Orchestrator*
"""
        output_file.write_text(content, encoding='utf-8')

    def _save_error(self, story: Dict, error: str, output_file: Path) -> None:
        """Save error information to output file."""
        content = f"""# {story['id']}: {story['title']} - FAILED
**Generated:** {datetime.now().isoformat()}
**Model:** {MODEL}

---

## Error

```
{error}
```

---

## Original Story

{story['content']}

---

*Generated by Ralph Wiggum TRUE Method Orchestrator*
"""
        output_file.write_text(content, encoding='utf-8')

    def update_progress(self) -> None:
        """Update the progress tracking file."""
        total = len(self.stories)
        completed = len(self.completed)
        failed = len(self.failed)
        remaining = total - completed - failed

        content = f"""# Ralph Wiggum TRUE Method - Progress Report
**Updated:** {datetime.now().isoformat()}
**Model:** {MODEL}

## Summary

| Metric | Count |
|--------|-------|
| Total Stories | {total} |
| Completed | {completed} |
| Failed | {failed} |
| Remaining | {remaining} |
| Progress | {(completed/total*100) if total > 0 else 0:.1f}% |

## Completed Stories
{chr(10).join([f'- ✅ {s}' for s in self.completed]) or '- None yet'}

## Failed Stories
{chr(10).join([f'- ❌ {s}' for s in self.failed]) or '- None'}

## Remaining Stories
{chr(10).join([f'- ⏳ {s["id"]}: {s["title"]}' for s in self.stories if s["id"] not in self.completed and s["id"] not in self.failed]) or '- None'}

## Output Files
{chr(10).join([f'- `logs/{s}_output.md`' for s in self.completed])}

---

*Ralph Wiggum TRUE Method: Fresh API Call Per Story*
"""
        PROGRESS_FILE.write_text(content, encoding='utf-8')
        print(f"\n📊 Progress updated: {PROGRESS_FILE}")

    def run(self, start_from: Optional[str] = None, limit: Optional[int] = None) -> Dict:
        """
        Run the orchestrator for all stories.

        Args:
            start_from: Story ID to start from (e.g., "US-002")
            limit: Maximum number of stories to process
        """
        # Load PRD
        if not self.load_prd():
            return {"success": False, "error": "Failed to load PRD"}

        # Filter stories if start_from specified
        stories_to_run = self.stories
        if start_from:
            start_idx = next((i for i, s in enumerate(self.stories) if s["id"] == start_from), 0)
            stories_to_run = self.stories[start_idx:]

        if limit:
            stories_to_run = stories_to_run[:limit]

        print(f"\n[2] Executing {len(stories_to_run)} stories...")
        print(f"    Start: {stories_to_run[0]['id'] if stories_to_run else 'N/A'}")
        print(f"    End: {stories_to_run[-1]['id'] if stories_to_run else 'N/A'}")

        results = []

        for i, story in enumerate(stories_to_run, 1):
            print(f"\n[{i}/{len(stories_to_run)}] ", end="")

            result = self.execute_story(story)
            results.append(result)

            # Update progress after each story
            self.update_progress()

            # Small delay between calls to be respectful to API
            if i < len(stories_to_run):
                print("    Waiting 2s before next story...")
                time.sleep(2)

        # Final summary
        print("\n" + "=" * 60)
        print("EXECUTION COMPLETE")
        print("=" * 60)
        print(f"✅ Completed: {len(self.completed)}")
        print(f"❌ Failed: {len(self.failed)}")
        print(f"📁 Outputs: {LOGS_DIR}")
        print(f"📊 Progress: {PROGRESS_FILE}")

        return {
            "success": len(self.failed) == 0,
            "completed": self.completed,
            "failed": self.failed,
            "results": results
        }


def main():
    """Main entry point."""
    import argparse

    parser = argparse.ArgumentParser(description="Ralph Wiggum TRUE Method Orchestrator")
    parser.add_argument("--start", "-s", help="Story ID to start from (e.g., US-002)")
    parser.add_argument("--limit", "-l", type=int, help="Maximum stories to process")
    parser.add_argument("--dry-run", "-d", action="store_true", help="Parse PRD only, don't execute")

    args = parser.parse_args()

    orchestrator = RalphOrchestrator()

    if args.dry_run:
        orchestrator.load_prd()
        print("\n[DRY RUN] Stories found:")
        for story in orchestrator.stories:
            print(f"  - {story['id']}: {story['title']}")
        return

    result = orchestrator.run(start_from=args.start, limit=args.limit)

    if not result["success"]:
        print(f"\n⚠️ Some stories failed: {result['failed']}")
        exit(1)


if __name__ == "__main__":
    main()
