#!/usr/bin/env python3
"""
AIVA Bridge - Genesis to AIVA Connection Layer
===============================================
Connects Genesis Kernel to AIVA (QwenLong 30.5B MoE on Elestio).

Provides:
- Direct reasoning calls to AIVA's Ollama endpoint
- Redis pub/sub integration for event-driven communication
- Health monitoring and automatic reconnection
- Context-aware prompt engineering

Usage:
    from aiva_bridge import AIVABridge

    bridge = AIVABridge()
    response = await bridge.reason("Analyze this code for bugs")

    # Or with context
    response = await bridge.reason(
        prompt="What should Genesis do next?",
        context={"task": "memory consolidation", "priority": "high"}
    )
"""

import os
import json
import asyncio
import logging
from datetime import datetime
from typing import Optional, Dict, Any, List
from pathlib import Path

try:
    import httpx
except ImportError:
    httpx = None

# Fallback for when httpx isn't available
import urllib.request
import urllib.error

try:
    import redis.asyncio as aioredis
except ImportError:
    aioredis = None

# Setup logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("AIVABridge")

# Load environment
ENV_PATH = Path(__file__).parent.parent / "AIVA" / "aiva_config.env"
if ENV_PATH.exists():
    try:
        from dotenv import load_dotenv
        load_dotenv(ENV_PATH)
    except ImportError:
        # Manual env loading fallback
        with open(ENV_PATH) as f:
            for line in f:
                line = line.strip()
                if line and not line.startswith('#') and '=' in line:
                    key, value = line.split('=', 1)
                    os.environ.setdefault(key.strip(), value.strip())


class AIVABridge:
    """
    Bridge between Genesis Kernel and AIVA reasoning engine.
    """

    # Default Elestio endpoints
    DEFAULT_OLLAMA_URL = "http://152.53.201.152:23405/api/generate"
    DEFAULT_REDIS_URL = "redis://:e2ZyYYr4oWRdASI2CaLc-@redis-genesis-u50607.vm.elestio.app:26379"

    def __init__(self):
        self.ollama_url = os.getenv("OLLAMA_URL", self.DEFAULT_OLLAMA_URL)
        self.redis_url = os.getenv("REDIS_URL", self.DEFAULT_REDIS_URL)
        self.model = os.getenv("AIVA_MODEL", "qwen-long")
        self.timeout = float(os.getenv("AIVA_TIMEOUT", "300"))  # 5 min default
        self.is_connected = False
        self._redis_client = None
        self._http_client = None

        logger.info(f"AIVABridge initialized | Model: {self.model} | Endpoint: {self.ollama_url.split('/api')[0]}")

    async def _get_http_client(self) -> "httpx.AsyncClient":
        """Get or create HTTP client."""
        if httpx is None:
            raise ImportError("httpx required: pip install httpx")
        if self._http_client is None or self._http_client.is_closed:
            self._http_client = httpx.AsyncClient(timeout=self.timeout)
        return self._http_client

    async def _get_redis_client(self):
        """Get or create Redis client."""
        if aioredis is None:
            raise ImportError("redis required: pip install redis")
        if self._redis_client is None:
            self._redis_client = aioredis.from_url(self.redis_url)
        return self._redis_client

    async def health_check(self) -> Dict[str, Any]:
        """Check AIVA/Ollama health status."""
        tags_url = self.ollama_url.replace("/api/generate", "/api/tags")
        try:
            if httpx:
                client = await self._get_http_client()
                response = await client.get(tags_url, timeout=10.0)
                response.raise_for_status()
                data = response.json()
            else:
                # Fallback to urllib
                req = urllib.request.Request(tags_url)
                with urllib.request.urlopen(req, timeout=10) as resp:
                    data = json.loads(resp.read().decode())

            models = [m.get("name") for m in data.get("models", [])]
            qwen_ready = self.model in models or f"{self.model}:latest" in models

            self.is_connected = qwen_ready
            return {
                "status": "online" if qwen_ready else "model_missing",
                "models": models,
                "target_model": self.model,
                "model_ready": qwen_ready,
                "endpoint": self.ollama_url.split("/api")[0],
                "timestamp": datetime.now().isoformat()
            }
        except Exception as e:
            self.is_connected = False
            return {
                "status": "offline",
                "error": str(e),
                "endpoint": self.ollama_url.split("/api")[0],
                "timestamp": datetime.now().isoformat()
            }

    async def reason(
        self,
        prompt: str,
        context: Optional[Dict[str, Any]] = None,
        system_prompt: Optional[str] = None,
        temperature: float = 0.7,
        max_tokens: int = 4096,
        strip_thinking: bool = False
    ) -> Dict[str, Any]:
        """
        Send reasoning request to AIVA.

        Args:
            prompt: The question or task for AIVA
            context: Optional context dict to include
            system_prompt: Override default system prompt
            temperature: Sampling temperature (0.0-1.0)
            max_tokens: Maximum tokens to generate
            strip_thinking: Remove <think> blocks from response

        Returns:
            Dict with 'response', 'tokens', 'duration', 'success'
        """
        try:
            # Build full prompt with context
            full_prompt = prompt
            if context:
                context_str = json.dumps(context, indent=2)
                full_prompt = f"Context:\n```json\n{context_str}\n```\n\nTask: {prompt}"

            payload = {
                "model": self.model,
                "prompt": full_prompt,
                "stream": False,
                "options": {
                    "num_ctx": 64000,
                    "temperature": temperature,
                    "num_predict": max_tokens,
                    "top_p": 0.8,
                    "top_k": 20,
                    "repeat_penalty": 1.05
                }
            }

            if system_prompt:
                payload["system"] = system_prompt

            logger.info(f"AIVA reasoning request: {prompt[:80]}...")
            start_time = datetime.now()

            if httpx:
                client = await self._get_http_client()
                response = await client.post(self.ollama_url, json=payload)
                response.raise_for_status()
                data = response.json()
            else:
                # Fallback to urllib
                req = urllib.request.Request(
                    self.ollama_url,
                    data=json.dumps(payload).encode('utf-8'),
                    headers={'Content-Type': 'application/json'},
                    method='POST'
                )
                with urllib.request.urlopen(req, timeout=self.timeout) as resp:
                    data = json.loads(resp.read().decode())

            duration = (datetime.now() - start_time).total_seconds()
            response_text = data.get("response", "")

            # Optionally strip thinking blocks
            if strip_thinking and "<think>" in response_text:
                import re
                response_text = re.sub(r'<think>.*?</think>\s*', '', response_text, flags=re.DOTALL)

            tokens = data.get("eval_count", 0)
            logger.info(f"AIVA response: {tokens} tokens in {duration:.1f}s")

            return {
                "success": True,
                "response": response_text.strip(),
                "tokens": tokens,
                "duration": duration,
                "model": self.model,
                "thinking": "<think>" in data.get("response", "")
            }

        except Exception as e:
            logger.error(f"AIVA reasoning error: {e}")
            return {
                "success": False,
                "response": None,
                "error": str(e),
                "tokens": 0,
                "duration": 0
            }

    async def publish_event(self, channel: str, event: Dict[str, Any]) -> bool:
        """Publish event to Genesis nervous system via Redis."""
        try:
            redis = await self._get_redis_client()
            event["timestamp"] = datetime.now().isoformat()
            event["source"] = "genesis_kernel"
            await redis.publish(channel, json.dumps(event))
            logger.info(f"Published to {channel}: {event.get('type', 'unknown')}")
            return True
        except Exception as e:
            logger.error(f"Redis publish error: {e}")
            return False

    async def request_validation(self, content: str, worker_id: str = "kernel") -> Dict[str, Any]:
        """Request AIVA validation via Redis (triggers daemon if running)."""
        event = {
            "type": "validation_request",
            "worker_id": worker_id,
            "output": content,
            "requested_at": datetime.now().isoformat()
        }
        await self.publish_event("genesis:nervous_system", event)

        # Also do direct reasoning for immediate response
        return await self.reason(
            prompt=f"Validate this output for correctness and completeness:\n\n{content}",
            context={"worker_id": worker_id, "validation_type": "quality_check"}
        )

    async def close(self):
        """Clean up connections."""
        if self._http_client and not self._http_client.is_closed:
            await self._http_client.aclose()
        if self._redis_client:
            await self._redis_client.close()
        logger.info("AIVABridge connections closed")


# Synchronous wrapper for non-async code
class AIVABridgeSync:
    """Synchronous wrapper for AIVABridge."""

    def __init__(self):
        self._bridge = AIVABridge()
        self._loop = None

    def _get_loop(self):
        if self._loop is None or self._loop.is_closed():
            try:
                self._loop = asyncio.get_event_loop()
            except RuntimeError:
                self._loop = asyncio.new_event_loop()
                asyncio.set_event_loop(self._loop)
        return self._loop

    def health_check(self) -> Dict[str, Any]:
        return self._get_loop().run_until_complete(self._bridge.health_check())

    def reason(self, prompt: str, **kwargs) -> Dict[str, Any]:
        return self._get_loop().run_until_complete(self._bridge.reason(prompt, **kwargs))

    def close(self):
        self._get_loop().run_until_complete(self._bridge.close())


# Quick test
if __name__ == "__main__":
    async def test():
        bridge = AIVABridge()

        print("=== AIVA Health Check ===")
        health = await bridge.health_check()
        print(json.dumps(health, indent=2))

        if health.get("model_ready"):
            print("\n=== AIVA Reasoning Test ===")
            result = await bridge.reason(
                "What is 2 + 2? Answer in one word.",
                temperature=0.1
            )
            print(f"Response: {result.get('response')}")
            print(f"Tokens: {result.get('tokens')}, Duration: {result.get('duration'):.1f}s")

        await bridge.close()

    asyncio.run(test())
