#!/usr/bin/env python3
"""
AIVA 24/7 Orchestrator
Autonomous orchestration of Genesis system using Redis task queue.

Components:
- Task Queue (Redis) - prioritized work items
- AIVA Brain (Ollama) - decision making
- Claude Bridge - execution via Claude Code
- Watchdog - health monitoring
- Slack Bridge - notifications to Kinan

Architecture:
┌─────────────────────────────────────────────────────────┐
│                    AIVA ORCHESTRATOR                     │
├─────────────────────────────────────────────────────────┤
│  ┌─────────┐    ┌─────────┐    ┌─────────────────────┐ │
│  │  Redis  │───▶│  AIVA   │───▶│   Claude Code       │ │
│  │  Queue  │    │  Brain  │    │   (4 terminals)     │ │
│  └─────────┘    └─────────┘    └─────────────────────┘ │
│       ▲              │                    │            │
│       │              ▼                    ▼            │
│  ┌─────────┐    ┌─────────┐    ┌─────────────────────┐ │
│  │ Watchdog│    │  Slack  │    │   Genesis Tools     │ │
│  │ Monitor │    │  Bridge │    │   (MCP Servers)     │ │
│  └─────────┘    └─────────┘    └─────────────────────┘ │
└─────────────────────────────────────────────────────────┘
"""

import os
import sys
import json
import time
import asyncio
import logging
import signal
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Any
from dataclasses import dataclass, asdict
from enum import Enum
from pathlib import Path
import threading
import subprocess
import hashlib

# Add parent to path for imports
sys.path.insert(0, str(Path(__file__).parent.parent))

# GENESIS EXECUTION LAYER - ALL TASKS MUST FLOW THROUGH HERE
from core.genesis_execution_layer import get_execution_layer, ExecutionMode

# Load secrets FIRST before any config that uses environment variables
_secrets_path = Path("/mnt/e/genesis-system/config/secrets.env")
if _secrets_path.exists():
    with open(_secrets_path) as _f:
        for _line in _f:
            if "=" in _line and not _line.startswith("#"):
                _key, _value = _line.strip().split("=", 1)
                os.environ[_key] = _value.strip('"')

import redis
import httpx

# Configuration (secrets loaded above)
REDIS_CONFIG = {
    "host": os.environ.get("GENESIS_REDIS_HOST", "redis-genesis-u50607.vm.elestio.app"),
    "port": int(os.environ.get("GENESIS_REDIS_PORT", 26379)),
    "password": os.environ.get("GENESIS_REDIS_PASSWORD", ""),
    "decode_responses": True,
}

OLLAMA_CONFIG = {
    "host": os.environ.get("AIVA_OLLAMA_HOST", "ollama-genesis-aiva-u50607.vm.elestio.app"),
    "port": int(os.environ.get("AIVA_OLLAMA_PORT", 57887)),
    "user": os.environ.get("AIVA_OLLAMA_USER", "root"),
    "password": os.environ.get("AIVA_OLLAMA_PASS", ""),
    "model": os.environ.get("AIVA_MODEL", "huihui_ai/qwenlong-l1.5-abliterated:30b-a3b"),
}

# Redis Keys
QUEUE_KEY = "genesis:task_queue"
ACTIVE_KEY = "genesis:active_tasks"
COMPLETED_KEY = "genesis:completed_tasks"
FAILED_KEY = "genesis:failed_tasks"
HEARTBEAT_KEY = "genesis:aiva_heartbeat"
CONFIG_KEY = "genesis:orchestrator_config"
METRICS_KEY = "genesis:metrics"

# Logging
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s [%(levelname)s] %(name)s: %(message)s',
    handlers=[
        logging.StreamHandler(),
        logging.FileHandler('/mnt/e/genesis-system/data/aiva_orchestrator.log')
    ]
)
logger = logging.getLogger("AIVA")


class TaskPriority(Enum):
    CRITICAL = 1
    HIGH = 2
    MEDIUM = 3
    LOW = 4
    BACKGROUND = 5


class TaskStatus(Enum):
    PENDING = "pending"
    ACTIVE = "active"
    COMPLETED = "completed"
    FAILED = "failed"
    CANCELLED = "cancelled"


@dataclass
class Task:
    """A unit of work for AIVA to orchestrate."""
    id: str
    title: str
    description: str
    priority: int = TaskPriority.MEDIUM.value
    status: str = TaskStatus.PENDING.value
    created_at: str = ""
    started_at: str = ""
    completed_at: str = ""
    assigned_to: str = ""  # Which Claude terminal or "gemini"
    max_iterations: int = 30
    context_files: List[str] = None
    success_criteria: List[str] = None
    result: str = ""
    error: str = ""
    metadata: Dict = None
    source: str = ""  # Source of task (bootstrap, voice, manual, etc.)
    complexity: str = "simple"  # simple, medium, complex - determines executor

    def __post_init__(self):
        if not self.id:
            self.id = hashlib.md5(f"{self.title}{time.time()}".encode()).hexdigest()[:12]
        if not self.created_at:
            self.created_at = datetime.now().isoformat()
        if self.context_files is None:
            self.context_files = []
        if self.success_criteria is None:
            self.success_criteria = []
        if self.metadata is None:
            self.metadata = {}

    def to_dict(self) -> Dict:
        return asdict(self)

    @classmethod
    def from_dict(cls, data: Dict) -> 'Task':
        """Create Task from dict, ignoring unknown fields for backwards compatibility."""
        import inspect
        valid_fields = {f.name for f in cls.__dataclass_fields__.values()}
        filtered_data = {k: v for k, v in data.items() if k in valid_fields}
        return cls(**filtered_data)


class RedisTaskQueue:
    """Redis-backed priority task queue."""

    def __init__(self):
        self.redis = redis.Redis(**REDIS_CONFIG)
        self._verify_connection()

    def _verify_connection(self):
        """Verify Redis connection."""
        try:
            self.redis.ping()
            logger.info("Redis connection established")
        except redis.ConnectionError as e:
            logger.error(f"Redis connection failed: {e}")
            raise

    def enqueue(self, task: Task) -> str:
        """Add task to queue with priority."""
        task_data = json.dumps(task.to_dict())
        # Use sorted set for priority queue (lower score = higher priority)
        score = task.priority * 1000000 + time.time()
        self.redis.zadd(QUEUE_KEY, {task_data: score})
        logger.info(f"Enqueued task: {task.id} - {task.title} (priority {task.priority})")
        return task.id

    def dequeue(self) -> Optional[Task]:
        """Get highest priority task from queue."""
        # Get and remove lowest score (highest priority)
        result = self.redis.zpopmin(QUEUE_KEY, count=1)
        if result:
            task_data, score = result[0]
            task = Task.from_dict(json.loads(task_data))
            task.status = TaskStatus.ACTIVE.value
            task.started_at = datetime.now().isoformat()
            # Track active task
            self.redis.hset(ACTIVE_KEY, task.id, json.dumps(task.to_dict()))
            return task
        return None

    def complete(self, task: Task, result: str = ""):
        """Mark task as completed."""
        task.status = TaskStatus.COMPLETED.value
        task.completed_at = datetime.now().isoformat()
        task.result = result
        # Move from active to completed
        self.redis.hdel(ACTIVE_KEY, task.id)
        self.redis.hset(COMPLETED_KEY, task.id, json.dumps(task.to_dict()))
        logger.info(f"Completed task: {task.id} - {task.title}")

    def fail(self, task: Task, error: str = ""):
        """Mark task as failed."""
        task.status = TaskStatus.FAILED.value
        task.completed_at = datetime.now().isoformat()
        task.error = error
        # Move from active to failed
        self.redis.hdel(ACTIVE_KEY, task.id)
        self.redis.hset(FAILED_KEY, task.id, json.dumps(task.to_dict()))
        logger.warning(f"Failed task: {task.id} - {task.title}: {error}")

    def get_queue_length(self) -> int:
        """Get number of pending tasks."""
        return self.redis.zcard(QUEUE_KEY)

    def get_active_tasks(self) -> List[Task]:
        """Get all currently active tasks."""
        tasks = []
        for task_data in self.redis.hvals(ACTIVE_KEY):
            tasks.append(Task.from_dict(json.loads(task_data)))
        return tasks

    def get_stats(self) -> Dict:
        """Get queue statistics."""
        return {
            "pending": self.redis.zcard(QUEUE_KEY),
            "active": self.redis.hlen(ACTIVE_KEY),
            "completed": self.redis.hlen(COMPLETED_KEY),
            "failed": self.redis.hlen(FAILED_KEY),
        }


class AIVABrain:
    """AIVA's decision-making core using Ollama."""

    def __init__(self):
        self.base_url = f"http://{OLLAMA_CONFIG['host']}:{OLLAMA_CONFIG['port']}"
        self.auth = (OLLAMA_CONFIG['user'], OLLAMA_CONFIG['password'])
        self.model = OLLAMA_CONFIG['model']
        self.system_prompt = self._load_system_prompt()

    def _load_system_prompt(self) -> str:
        """Load AIVA's system prompt."""
        prompt_path = Path("/mnt/e/genesis-system/AIVA_CORE/SYSTEM_PROMPT.md")
        if prompt_path.exists():
            return prompt_path.read_text()
        return """You are AIVA, the autonomous orchestrator of the Genesis system.
Your role is to:
1. Prioritize and assign tasks to Claude Code agents
2. Monitor progress and adjust strategies
3. Make autonomous decisions within your authority
4. Report significant events to Kinan
5. Ensure Genesis operates 24/7 without intervention

You have access to 4 Claude Code terminals and can delegate work intelligently."""

    async def think(self, context: str, question: str) -> str:
        """Get AIVA's decision on a question."""
        async with httpx.AsyncClient(auth=self.auth, timeout=120) as client:
            response = await client.post(
                f"{self.base_url}/api/generate",
                json={
                    "model": self.model,
                    "prompt": f"{self.system_prompt}\n\nContext:\n{context}\n\nQuestion: {question}",
                    "stream": False,
                }
            )
            if response.status_code == 200:
                return response.json().get("response", "")
            else:
                logger.error(f"Ollama error: {response.status_code}")
                return ""

    async def prioritize_tasks(self, tasks: List[Dict]) -> List[Dict]:
        """Have AIVA prioritize a list of tasks."""
        if not tasks:
            return []

        context = f"Current pending tasks:\n{json.dumps(tasks, indent=2)}"
        question = """Analyze these tasks and return them in priority order.
Consider: urgency, dependencies, strategic value, and resource requirements.
Return as JSON array with 'id' and 'priority' (1=critical, 5=background)."""

        response = await self.think(context, question)
        try:
            return json.loads(response)
        except:
            return tasks

    async def decide_assignment(self, task: Task, terminals: List[str]) -> str:
        """Decide which terminal should handle a task."""
        context = f"Task: {task.title}\nDescription: {task.description}\nAvailable terminals: {terminals}"
        question = "Which terminal should handle this task? Return just the terminal name."
        response = await self.think(context, question)
        return response.strip() if response else terminals[0]


class GeminiExecutor:
    """Cost-efficient Gemini 2.0 Flash executor for task work."""

    def __init__(self):
        import google.generativeai as genai
        genai.configure(api_key=os.environ.get("GEMINI_API_KEY", ""))
        self.model = genai.GenerativeModel("gemini-2.0-flash-exp")
        self.total_cost = 0.0
        # Rate limit handling
        self.rate_limited_until = None
        self.backoff_seconds = 60  # Start with 1 minute
        self.max_backoff = 3600  # Max 1 hour backoff

    def is_rate_limited(self) -> bool:
        """Check if currently rate limited."""
        if self.rate_limited_until is None:
            return False
        if datetime.now() >= self.rate_limited_until:
            self.rate_limited_until = None
            self.backoff_seconds = 60  # Reset backoff
            logger.info("Rate limit lifted, resuming Gemini execution")
            return False
        remaining = (self.rate_limited_until - datetime.now()).total_seconds()
        logger.debug(f"Rate limited for {remaining:.0f} more seconds")
        return True

    def _handle_rate_limit(self, error_msg: str):
        """Apply exponential backoff for rate limits."""
        self.rate_limited_until = datetime.now() + timedelta(seconds=self.backoff_seconds)
        logger.warning(f"Gemini rate limited. Backing off for {self.backoff_seconds}s until {self.rate_limited_until}")
        # Exponential backoff with cap
        self.backoff_seconds = min(self.backoff_seconds * 2, self.max_backoff)

    async def execute(self, task: Dict) -> Dict:
        """Execute task with Gemini 2.0 Flash (50x cheaper than Claude)."""
        # Check rate limit before attempting
        if self.is_rate_limited():
            return {
                "success": False,
                "error": "rate_limited",
                "retry_after": (self.rate_limited_until - datetime.now()).total_seconds()
            }

        prompt = f"""You are a Genesis system worker. Execute this task completely.

TASK: {task.get('title', '')}
DESCRIPTION: {task.get('description', '')}

Requirements:
- Write complete, production-ready Python code
- Include all imports, error handling, logging
- Follow existing Genesis patterns
- Add type hints and docstrings

Output the complete solution now."""

        try:
            response = self.model.generate_content(prompt)
            tokens = response.usage_metadata.prompt_token_count + response.usage_metadata.candidates_token_count
            cost = (tokens / 1_000_000) * 0.25  # Average of input/output pricing
            self.total_cost += cost
            # Reset backoff on success
            self.backoff_seconds = 60

            return {
                "success": True,
                "result": response.text,
                "cost": cost,
                "tokens": tokens,
            }
        except Exception as e:
            error_str = str(e).lower()
            # Detect rate limit errors
            if "429" in error_str or "rate" in error_str or "quota" in error_str or "resource" in error_str:
                self._handle_rate_limit(str(e))
                return {"success": False, "error": "rate_limited", "retry_after": self.backoff_seconds}
            return {"success": False, "error": str(e)}

    def save_code(self, code: str, path: str) -> bool:
        """Save generated code to file."""
        try:
            # Clean markdown code blocks
            if "```python" in code:
                code = code.split("```python")[1].split("```")[0]
            elif "```" in code:
                code = code.split("```")[1].split("```")[0]

            output_path = Path(path)
            output_path.parent.mkdir(parents=True, exist_ok=True)
            output_path.write_text(code.strip())
            return True
        except:
            return False


class ClaudeBridge:
    """Bridge to Claude Code terminals via tmux (for complex tasks only)."""

    def __init__(self):
        self.session_name = "genesis"
        self.terminals = ["claude1", "claude2", "claude3", "claude4"]

    def is_session_active(self) -> bool:
        """Check if tmux session exists."""
        result = subprocess.run(
            ["tmux", "has-session", "-t", self.session_name],
            capture_output=True
        )
        return result.returncode == 0

    def get_terminal_status(self, terminal: str) -> str:
        """Get status of a specific terminal."""
        try:
            result = subprocess.run(
                ["tmux", "capture-pane", "-t", f"{self.session_name}:{terminal}", "-p"],
                capture_output=True,
                text=True
            )
            output = result.stdout.strip()
            if ">" in output or "$" in output:
                return "idle"
            return "busy"
        except:
            return "unknown"

    def send_command(self, terminal: str, command: str) -> bool:
        """Send command to a terminal."""
        try:
            subprocess.run(
                ["tmux", "send-keys", "-t", f"{self.session_name}:{terminal}", command, "Enter"],
                check=True
            )
            logger.info(f"Sent to {terminal}: {command[:50]}...")
            return True
        except subprocess.CalledProcessError as e:
            logger.error(f"Failed to send command to {terminal}: {e}")
            return False

    def get_idle_terminal(self) -> Optional[str]:
        """Find an idle terminal."""
        for terminal in self.terminals:
            if self.get_terminal_status(terminal) == "idle":
                return terminal
        return None

    def dispatch_task(self, task: Task, terminal: str) -> bool:
        """Dispatch a task to a terminal."""
        # Build the command
        if task.context_files:
            files_str = " ".join(task.context_files)
            command = f"Read {files_str} then {task.description}"
        else:
            command = task.description

        # Add iteration limit
        if task.max_iterations:
            command = f"{command} (max {task.max_iterations} iterations)"

        return self.send_command(terminal, command)


class Watchdog:
    """Health monitoring and auto-recovery."""

    def __init__(self, redis_client: redis.Redis):
        self.redis = redis_client
        self.check_interval = 60  # seconds
        self.heartbeat_timeout = 300  # 5 minutes

    def pulse(self):
        """Send heartbeat."""
        self.redis.setex(
            HEARTBEAT_KEY,
            self.heartbeat_timeout,
            json.dumps({
                "timestamp": datetime.now().isoformat(),
                "status": "alive",
                "pid": os.getpid(),
            })
        )

    def is_healthy(self) -> bool:
        """Check if orchestrator is healthy."""
        heartbeat = self.redis.get(HEARTBEAT_KEY)
        return heartbeat is not None

    def get_last_heartbeat(self) -> Optional[Dict]:
        """Get last heartbeat data."""
        data = self.redis.get(HEARTBEAT_KEY)
        if data:
            return json.loads(data)
        return None


class SlackBridge:
    """Notifications to Kinan via Slack."""

    def __init__(self):
        self.webhook_url = os.environ.get("SLACK_WEBHOOK_URL", "")
        self.enabled = bool(self.webhook_url)

    async def notify(self, message: str, level: str = "info"):
        """Send notification to Slack."""
        if not self.enabled:
            logger.info(f"[Slack disabled] {level}: {message}")
            return

        emoji = {"info": "ℹ️", "warning": "⚠️", "error": "🚨", "success": "✅"}.get(level, "📢")

        async with httpx.AsyncClient() as client:
            await client.post(self.webhook_url, json={
                "text": f"{emoji} *AIVA*: {message}",
                "username": "AIVA Orchestrator",
            })

    async def daily_summary(self, stats: Dict):
        """Send daily summary."""
        message = f"""*Daily Genesis Summary*
• Tasks completed: {stats.get('completed', 0)}
• Tasks failed: {stats.get('failed', 0)}
• Tasks pending: {stats.get('pending', 0)}
• Uptime: {stats.get('uptime', 'unknown')}"""
        await self.notify(message, "info")


class AIVAOrchestrator:
    """Main orchestrator class - runs 24/7.

    Model Stack Strategy:
    - Claude (AIVA Brain): High-level decisions, orchestration
    - Gemini 2.0 Flash: Actual task execution (50x cheaper)
    - Claude terminals: Only for complex multi-file tasks
    """

    def __init__(self):
        self.queue = RedisTaskQueue()
        self.brain = AIVABrain()  # Claude for decisions
        self.gemini = GeminiExecutor()  # Gemini for work (cheap)
        self.claude = ClaudeBridge()  # Claude terminals for complex only
        self.watchdog = Watchdog(self.queue.redis)
        self.slack = SlackBridge()

        # GENESIS EXECUTION LAYER - HARDWIRED
        self.execution_layer = get_execution_layer()

        self.running = False
        self.start_time = datetime.now()
        self.loop_interval = 15  # faster loop for Gemini
        self.use_gemini_for_simple = True  # Cost optimization flag

    async def start(self):
        """Start the orchestrator."""
        self.running = True
        logger.info("=" * 60)
        logger.info("AIVA ORCHESTRATOR STARTING")
        logger.info("=" * 60)

        await self.slack.notify("AIVA Orchestrator starting up", "info")

        # Main loop
        while self.running:
            try:
                await self._loop_iteration()
                self.watchdog.pulse()
                await asyncio.sleep(self.loop_interval)
            except Exception as e:
                logger.error(f"Loop error: {e}")
                await self.slack.notify(f"Loop error: {e}", "error")
                await asyncio.sleep(60)  # Wait before retry

    async def _loop_iteration(self):
        """Single iteration of the main loop.

        GENESIS EXECUTION LAYER (HARDWIRED):
        ALL tasks now flow through the Genesis Execution Layer which:
        1. Auto-decomposes tasks into RWL stories
        2. Executes via Gemini swarm with rate maximization
        3. Self-verifies against acceptance criteria
        """
        # Check for available work
        queue_length = self.queue.get_queue_length()
        if queue_length == 0:
            return

        # Get next task
        task = self.queue.dequeue()
        if not task:
            return

        # GENESIS EXECUTION LAYER - ALL TASKS FLOW THROUGH HERE
        logger.info(f"Executing task {task.id} via GENESIS EXECUTION LAYER")
        task.assigned_to = "genesis_execution_layer"

        try:
            # Build context from task metadata
            context = ""
            if task.context_files:
                context = f"Context files: {', '.join(task.context_files)}"
            if task.success_criteria:
                context += f"\nSuccess criteria: {', '.join(task.success_criteria)}"

            # Execute via Genesis Execution Layer (RWL swarm + rate maximizer)
            result = await self.execution_layer.execute_task(
                task=f"{task.title}\n\n{task.description}",
                context=context
            )

            if result.success:
                # Extract output path from task if specified
                output_path = self._extract_output_path(task)
                if output_path and result.results:
                    # Try to save code from results
                    for r in result.results:
                        if r.get("result"):
                            saved = self.gemini.save_code(r["result"], output_path)
                            if saved:
                                logger.info(f"Saved code to {output_path}")
                                break

                summary = (
                    f"Genesis Execution Layer completed. "
                    f"Mode: {result.mode.value} | "
                    f"Stories: {result.stories_completed}/{result.stories_completed + result.stories_failed} | "
                    f"Tokens: {result.total_tokens} | "
                    f"Cost: ${result.total_cost:.4f}"
                )
                self.queue.complete(task, summary)
                logger.info(f"✅ Task {task.id} completed via Genesis Execution Layer | {summary}")
            else:
                error_msg = "; ".join(result.errors) if result.errors else "Execution failed"
                self.queue.fail(task, f"Genesis Execution Layer: {error_msg}")
                logger.warning(f"❌ Task {task.id} failed: {error_msg}")

        except Exception as e:
            logger.error(f"Genesis Execution Layer error: {e}")
            self.queue.fail(task, str(e))

    def _is_complex_task(self, task: Task) -> bool:
        """Determine if task requires Claude Code (complex) or can use Gemini (simple).

        Complex tasks:
        - Multi-file modifications
        - Require existing code context
        - Integration testing
        - Architecture decisions
        """
        # Honor explicit complexity markers FIRST
        if task.complexity == "simple":
            return False  # Explicitly simple -> use Gemini
        if task.complexity == "complex":
            return True   # Explicitly complex -> use Claude

        desc_lower = task.description.lower()
        title_lower = task.title.lower()

        # Keywords indicating complex tasks
        complex_keywords = [
            "refactor", "multi-file", "architecture",
            "test suite", "debug", "fix bug", "investigate",
            "review", "analyze codebase", "existing code", "modify all"
        ]

        for keyword in complex_keywords:
            if keyword in desc_lower or keyword in title_lower:
                return True

        # If task references multiple files
        if task.context_files and len(task.context_files) > 2:
            return True

        return False

    def _extract_output_path(self, task: Task) -> Optional[str]:
        """Extract output file path from task description."""
        import re
        desc = task.description

        # Look for explicit file paths
        match = re.search(r'(/mnt/e/genesis-system/[^\s]+\.py)', desc)
        if match:
            return match.group(1)

        # Look for "create X at Y" patterns
        match = re.search(r'(?:create|build|write)\s+.*?(?:at|to)\s+([^\s]+\.py)', desc, re.I)
        if match:
            return f"/mnt/e/genesis-system/{match.group(1)}"

        return None

    def stop(self):
        """Stop the orchestrator gracefully."""
        logger.info("Shutting down AIVA Orchestrator...")
        self.running = False

    def add_task(self, title: str, description: str, priority: int = 3, **kwargs) -> str:
        """Add a task to the queue."""
        task = Task(
            id="",
            title=title,
            description=description,
            priority=priority,
            **kwargs
        )
        return self.queue.enqueue(task)

    def get_status(self) -> Dict:
        """Get orchestrator status."""
        uptime = datetime.now() - self.start_time
        return {
            "status": "running" if self.running else "stopped",
            "uptime": str(uptime),
            "queue_stats": self.queue.get_stats(),
            "terminals": {t: self.claude.get_terminal_status(t) for t in self.claude.terminals},
            "last_heartbeat": self.watchdog.get_last_heartbeat(),
        }


# CLI Interface
def main():
    """CLI entry point."""
    import argparse

    parser = argparse.ArgumentParser(description="AIVA 24/7 Orchestrator")
    parser.add_argument("command", choices=["start", "status", "add", "queue", "stop"],
                       help="Command to run")
    parser.add_argument("--title", help="Task title (for add command)")
    parser.add_argument("--description", help="Task description (for add command)")
    parser.add_argument("--priority", type=int, default=3, help="Task priority 1-5")

    args = parser.parse_args()

    # Secrets already loaded at module import time
    orchestrator = AIVAOrchestrator()

    if args.command == "start":
        # Handle signals
        def signal_handler(sig, frame):
            orchestrator.stop()
        signal.signal(signal.SIGINT, signal_handler)
        signal.signal(signal.SIGTERM, signal_handler)

        # Run
        asyncio.run(orchestrator.start())

    elif args.command == "status":
        status = orchestrator.get_status()
        print(json.dumps(status, indent=2))

    elif args.command == "add":
        if not args.title or not args.description:
            print("Error: --title and --description required")
            sys.exit(1)
        task_id = orchestrator.add_task(args.title, args.description, args.priority)
        print(f"Added task: {task_id}")

    elif args.command == "queue":
        stats = orchestrator.queue.get_stats()
        print(f"Queue Statistics:")
        print(f"  Pending:   {stats['pending']}")
        print(f"  Active:    {stats['active']}")
        print(f"  Completed: {stats['completed']}")
        print(f"  Failed:    {stats['failed']}")

    elif args.command == "stop":
        # Send stop signal via Redis
        orchestrator.queue.redis.publish("genesis:control", "stop")
        print("Stop signal sent")


if __name__ == "__main__":
    main()
