#!/usr/bin/env python3
"""
GENESIS PERSISTENT WORKER LOOP
================================
Always-on daemon that survives Claude Code session restarts.

Purpose:
    When a Claude session ends, all 45 in-session agents die.
    This process keeps running in the background, polling tasks.json
    every 30 seconds and dispatching pending tasks to the Gemini swarm.

Architecture:
    - Reads tasks from loop/tasks.json
    - Dispatches pending stories to genesis_execution_layer
    - Marks completed stories with passes=True + completed_at timestamp
    - Logs results to PostgreSQL genesis_tasks table
    - Logs human-readable output to logs/worker_loop.log
    - Handles SIGTERM/SIGINT gracefully (flushes state before exit)

Usage:
    python core/persistent_worker_loop.py            # foreground
    nohup python core/persistent_worker_loop.py &    # background daemon

Environment:
    Loads E:/genesis-system/.env automatically.
    Requires GEMINI_API_KEY (and optionally PostgreSQL via elestio_config).
"""

import asyncio
import json
import logging
import os
import signal
import sys
import time
from datetime import datetime, timezone
from pathlib import Path
from typing import Any, Dict, List, Optional

# ── Path bootstrap ────────────────────────────────────────────────────────────
GENESIS_ROOT = Path(__file__).parent.parent
sys.path.insert(0, str(GENESIS_ROOT))

# ── Load .env before any other imports that need API keys ────────────────────
def _load_env() -> None:
    """Load E:/genesis-system/.env into os.environ (simple parser, no deps)."""
    env_path = GENESIS_ROOT / ".env"
    if not env_path.exists():
        return
    with open(env_path, encoding="utf-8") as fh:
        for raw_line in fh:
            line = raw_line.strip()
            if not line or line.startswith("#") or "=" not in line:
                continue
            key, _, value = line.partition("=")
            key = key.strip()
            value = value.strip().strip('"').strip("'")
            if key and key not in os.environ:
                os.environ[key] = value

_load_env()

# ── Logging setup ─────────────────────────────────────────────────────────────
LOGS_DIR = GENESIS_ROOT / "logs"
LOGS_DIR.mkdir(parents=True, exist_ok=True)
LOG_FILE = LOGS_DIR / "worker_loop.log"

logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)s [%(levelname)s] %(message)s",
    datefmt="%Y-%m-%dT%H:%M:%S",
    handlers=[
        logging.FileHandler(str(LOG_FILE), encoding="utf-8"),
        logging.StreamHandler(sys.stdout),
    ],
)
log = logging.getLogger("genesis.worker_loop")

# ── Constants ─────────────────────────────────────────────────────────────────
TASKS_PATH = GENESIS_ROOT / "loop" / "tasks.json"
POLL_INTERVAL_SECONDS = 30
PID_FILE = GENESIS_ROOT / "data" / "worker_loop.pid"
LAST_TASK_FILE = GENESIS_ROOT / "data" / "worker_loop_last_task.json"


# ── Graceful shutdown ─────────────────────────────────────────────────────────
_shutdown_requested = False

def _handle_signal(signum: int, frame: Any) -> None:
    global _shutdown_requested
    sig_name = signal.Signals(signum).name
    log.info(f"Received {sig_name} — requesting graceful shutdown after current task completes.")
    _shutdown_requested = True

signal.signal(signal.SIGTERM, _handle_signal)
signal.signal(signal.SIGINT, _handle_signal)


# ── PID management ────────────────────────────────────────────────────────────
def _write_pid() -> None:
    PID_FILE.parent.mkdir(parents=True, exist_ok=True)
    PID_FILE.write_text(str(os.getpid()), encoding="utf-8")
    log.info(f"PID {os.getpid()} written to {PID_FILE}")

def _remove_pid() -> None:
    try:
        PID_FILE.unlink(missing_ok=True)
    except Exception:
        pass


# ── tasks.json helpers ────────────────────────────────────────────────────────
def _load_tasks() -> Dict[str, Any]:
    """Load tasks.json. Returns empty structure on error."""
    if not TASKS_PATH.exists():
        log.warning(f"tasks.json not found at {TASKS_PATH}")
        return {"stories": []}
    try:
        with open(TASKS_PATH, encoding="utf-8") as fh:
            return json.load(fh)
    except json.JSONDecodeError as exc:
        log.error(f"tasks.json is malformed: {exc}")
        return {"stories": []}

def _save_tasks(data: Dict[str, Any]) -> None:
    """Write tasks.json atomically (write to .tmp then rename)."""
    tmp = TASKS_PATH.with_suffix(".tmp")
    with open(tmp, "w", encoding="utf-8") as fh:
        json.dump(data, fh, indent=2)
    tmp.replace(TASKS_PATH)

def _pending_stories(data: Dict[str, Any]) -> List[Dict[str, Any]]:
    """Return stories where passes is False/missing and status != 'blocked'."""
    stories = data.get("stories", [])
    return [
        s for s in stories
        if not s.get("passes", False)
        and s.get("status", "pending") not in ("blocked", "in_progress")
    ]

def _mark_story_in_progress(data: Dict[str, Any], story_id: str) -> None:
    for s in data.get("stories", []):
        if s.get("id") == story_id:
            s["status"] = "in_progress"
            break
    _save_tasks(data)

def _mark_story_complete(
    data: Dict[str, Any],
    story_id: str,
    result_summary: str,
) -> None:
    now = datetime.now(timezone.utc).isoformat()
    for s in data.get("stories", []):
        if s.get("id") == story_id:
            s["passes"] = True
            s["status"] = "completed"
            s["completed_at"] = now
            s["worker_result"] = result_summary[:500]
            break
    # Recount
    stories = data.get("stories", [])
    data["completed_count"] = sum(1 for s in stories if s.get("passes", False))
    data["pending_count"] = sum(1 for s in stories if not s.get("passes", False))
    data["updated_at"] = now
    _save_tasks(data)

def _mark_story_failed(
    data: Dict[str, Any],
    story_id: str,
    error: str,
) -> None:
    for s in data.get("stories", []):
        if s.get("id") == story_id:
            s["status"] = "failed"
            s["worker_error"] = error[:500]
            s["last_attempt"] = datetime.now(timezone.utc).isoformat()
            break
    _save_tasks(data)


# ── Last-task tracking ────────────────────────────────────────────────────────
def _record_last_task(story: Dict[str, Any], outcome: str) -> None:
    payload = {
        "story_id": story.get("id"),
        "title": story.get("title"),
        "outcome": outcome,
        "timestamp": datetime.now(timezone.utc).isoformat(),
        "pid": os.getpid(),
    }
    LAST_TASK_FILE.parent.mkdir(parents=True, exist_ok=True)
    LAST_TASK_FILE.write_text(json.dumps(payload, indent=2), encoding="utf-8")


# ── PostgreSQL logging (optional — skips gracefully if unavailable) ───────────
def _log_to_postgres(story: Dict[str, Any], outcome: str, result: str) -> None:
    """
    Write task outcome to genesis_tasks table in Elestio PostgreSQL.
    Silently skips if the connection or table is unavailable.
    """
    try:
        sys.path.insert(0, str(GENESIS_ROOT / "data" / "genesis-memory"))
        from elestio_config import PostgresConfig  # type: ignore
        import psycopg2  # type: ignore

        conn = psycopg2.connect(**PostgresConfig.get_connection_params())
        conn.autocommit = True
        cur = conn.cursor()

        # Create table if it doesn't exist yet
        cur.execute("""
            CREATE TABLE IF NOT EXISTS genesis_tasks (
                id SERIAL PRIMARY KEY,
                story_id TEXT NOT NULL,
                title TEXT,
                outcome TEXT,
                result_summary TEXT,
                processed_at TIMESTAMPTZ DEFAULT NOW(),
                worker_pid INTEGER
            )
        """)

        cur.execute(
            """
            INSERT INTO genesis_tasks
                (story_id, title, outcome, result_summary, worker_pid)
            VALUES (%s, %s, %s, %s, %s)
            """,
            (
                story.get("id", "unknown"),
                story.get("title", "")[:255],
                outcome,
                result[:1000],
                os.getpid(),
            ),
        )
        cur.close()
        conn.close()
        log.debug(f"PostgreSQL log written for story {story.get('id')}")
    except Exception as exc:
        log.debug(f"PostgreSQL logging skipped ({type(exc).__name__}: {exc})")


# ── Task dispatcher ───────────────────────────────────────────────────────────
async def _dispatch_story(story: Dict[str, Any]) -> tuple[bool, str]:
    """
    Send a single story to the Genesis Execution Layer.

    Returns (success: bool, result_summary: str).
    Falls back to a lightweight Gemini direct call if the full
    execution layer is unavailable.
    """
    story_id = story.get("id", "?")
    title = story.get("title", "untitled")

    # Build prompt from story fields
    criteria = story.get("acceptance_criteria", [])
    criteria_text = "\n".join(
        f"  - {c.get('description', '')}" for c in criteria
    )
    notes = story.get("notes", "")
    prompt = (
        f"Execute Genesis story {story_id}: {title}\n\n"
        f"Acceptance criteria:\n{criteria_text}\n\n"
        f"Notes: {notes}\n\n"
        f"Complete the story. Return a brief summary of what was done."
    )

    # Try full execution layer first
    try:
        from core.genesis_execution_layer import execute_task_sync  # type: ignore
        log.info(f"[{story_id}] Dispatching via GenesisExecutionLayer...")
        result = await asyncio.get_event_loop().run_in_executor(
            None, execute_task_sync, prompt
        )
        summary = str(result)[:800] if result else "Completed (no output)"
        return True, summary
    except Exception as exc:
        log.warning(f"[{story_id}] ExecutionLayer unavailable ({exc}), trying GeminiExecutor...")

    # Fallback: direct GeminiExecutor
    try:
        from core.gemini_executor import GeminiExecutor  # type: ignore
        executor = GeminiExecutor()
        result = await executor.execute(prompt)
        summary = str(result)[:800] if result else "Completed via direct Gemini"
        return True, summary
    except Exception as exc:
        log.warning(f"[{story_id}] GeminiExecutor unavailable ({exc}), trying google.generativeai...")

    # Last resort: raw google-generativeai
    try:
        import google.generativeai as genai  # type: ignore
        genai.configure(api_key=os.environ.get("GEMINI_API_KEY", ""))
        model = genai.GenerativeModel("gemini-2.0-flash")
        response = model.generate_content(prompt)
        summary = (response.text or "")[:800]
        return True, summary
    except Exception as exc:
        error_msg = f"All dispatch paths failed: {exc}"
        log.error(f"[{story_id}] {error_msg}")
        return False, error_msg


# ── Main worker loop ──────────────────────────────────────────────────────────
async def run_worker_loop() -> None:
    """Main daemon loop. Polls tasks.json and dispatches pending stories."""
    log.info("=" * 60)
    log.info("GENESIS PERSISTENT WORKER LOOP — STARTING")
    log.info(f"PID: {os.getpid()}")
    log.info(f"Tasks file: {TASKS_PATH}")
    log.info(f"Poll interval: {POLL_INTERVAL_SECONDS}s")
    log.info(f"Log file: {LOG_FILE}")
    log.info("=" * 60)

    _write_pid()
    cycle = 0

    try:
        while not _shutdown_requested:
            cycle += 1
            log.info(f"--- Cycle {cycle} ---")

            data = _load_tasks()
            pending = _pending_stories(data)

            if not pending:
                log.info(f"No pending stories. Sleeping {POLL_INTERVAL_SECONDS}s...")
            else:
                log.info(f"Found {len(pending)} pending story/stories.")

                # Process one story per cycle to avoid hammering the API
                story = pending[0]
                story_id = story.get("id", "?")
                title = story.get("title", "untitled")

                log.info(f"Processing [{story_id}]: {title}")
                _mark_story_in_progress(data, story_id)

                try:
                    success, result = await _dispatch_story(story)
                except Exception as exc:
                    success = False
                    result = f"Unhandled dispatch exception: {exc}"

                if success:
                    log.info(f"[{story_id}] COMPLETE. Summary: {result[:200]}")
                    # Reload data (may have changed during dispatch)
                    data = _load_tasks()
                    _mark_story_complete(data, story_id, result)
                    _record_last_task(story, "completed")
                    _log_to_postgres(story, "completed", result)
                else:
                    log.error(f"[{story_id}] FAILED. Error: {result[:200]}")
                    data = _load_tasks()
                    _mark_story_failed(data, story_id, result)
                    _record_last_task(story, "failed")
                    _log_to_postgres(story, "failed", result)

            # Wait before next cycle, checking shutdown flag every second
            for _ in range(POLL_INTERVAL_SECONDS):
                if _shutdown_requested:
                    break
                await asyncio.sleep(1)

    finally:
        log.info("Worker loop shutting down — flushing state...")
        _remove_pid()
        log.info("Genesis worker loop STOPPED.")


# ── Entry point ───────────────────────────────────────────────────────────────
if __name__ == "__main__":
    asyncio.run(run_worker_loop())
