# integration_hub.py
import os
import asyncio
import logging
from datetime import datetime
from typing import Optional, Dict, Any, List, Tuple
from pathlib import Path
import json
import time
from functools import wraps

# Optional dependencies (install if needed)
try:
    import httpx
except ImportError:
    httpx = None

try:
    import redis.asyncio as aioredis
except ImportError:
    aioredis = None

try:
    import asyncpg
except ImportError:
    asyncpg = None

try:
    from qdrant_client import QdrantClient, models
    from qdrant_client.models import VectorParams, Distance
except ImportError:
    QdrantClient = None
    models = None
    VectorParams = None
    Distance = None

# Setup logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("IntegrationHub")

# Rate limiting decorator
def rate_limit(calls: int, period: int):
    """
    Rate limits a function to a certain number of calls per period.

    Args:
        calls: The maximum number of calls allowed within the period.
        period: The duration of the period in seconds.
    """
    def decorator(func):
        queue = asyncio.Queue(maxsize=calls)
        semaphore = asyncio.Semaphore(calls)

        async def _rate_limited(*args, **kwargs):
            try:
                await semaphore.acquire()
                now = time.monotonic()
                try:
                    await asyncio.wait_for(queue.get(), timeout=max(0, period - (now - queue._queue[0])))
                except asyncio.TimeoutError:
                    pass
                queue.put_nowait(now)
                return await func(*args, **kwargs)
            finally:
                semaphore.release()
        return _rate_limited
    return decorator


class IntegrationHub:
    """
    Unified integration hub for AIVA services.
    """

    DEFAULT_OLLAMA_URL = "http://152.53.201.152:23405/api/generate"  # Replace with your Ollama endpoint
    DEFAULT_REDIS_URL = "redis://:e2ZyYYr4oWRdASI2CaLc-@redis-genesis-u50607.vm.elestio.app:26379"  # Replace with your Redis URL
    DEFAULT_POSTGRES_URL = "postgresql://user:password@host:port/database" # Replace with your Postgres URL
    DEFAULT_QDRANT_URL = "http://localhost:6333" # Replace with your Qdrant URL

    def __init__(self):
        self.ollama_url = os.getenv("OLLAMA_URL", self.DEFAULT_OLLAMA_URL)
        self.redis_url = os.getenv("REDIS_URL", self.DEFAULT_REDIS_URL)
        self.postgres_url = os.getenv("POSTGRES_URL", self.DEFAULT_POSTGRES_URL)
        self.qdrant_url = os.getenv("QDRANT_URL", self.DEFAULT_QDRANT_URL)
        self.model = os.getenv("AIVA_MODEL", "qwen-long")
        self.timeout = float(os.getenv("AIVA_TIMEOUT", "300"))  # 5 min default
        self.max_tokens = int(os.getenv("AIVA_MAX_TOKENS", "4096"))
        self.temperature = float(os.getenv("AIVA_TEMPERATURE", "0.7"))

        self._redis_pool = None
        self._postgres_pool = None
        self._qdrant_client = None
        self._http_client = None
        self.is_connected = False  # Overall connection status

        logger.info("IntegrationHub initialized.")


    async def _get_http_client(self) -> "httpx.AsyncClient":
        """Get or create HTTP client."""
        if httpx is None:
            raise ImportError("httpx required: pip install httpx")
        if self._http_client is None or self._http_client.is_closed:
            self._http_client = httpx.AsyncClient(timeout=self.timeout)
        return self._http_client

    async def _create_redis_pool(self):
        """Create Redis connection pool."""
        if aioredis is None:
            raise ImportError("redis required: pip install redis")
        try:
            self._redis_pool = aioredis.from_url(self.redis_url, decode_responses=True)
            await self._redis_pool.ping()  # Check connection
            logger.info("Connected to Redis.")
        except Exception as e:
            logger.error(f"Redis connection error: {e}")
            self._redis_pool = None

    async def _get_redis_pool(self):
        """Get Redis connection pool, creating if needed."""
        if self._redis_pool is None:
            await self._create_redis_pool()
        return self._redis_pool

    async def _create_postgres_pool(self):
        """Create PostgreSQL connection pool."""
        if asyncpg is None:
            raise ImportError("asyncpg required: pip install asyncpg")
        try:
            self._postgres_pool = await asyncpg.create_pool(self.postgres_url)
            logger.info("Connected to PostgreSQL.")
        except Exception as e:
            logger.error(f"PostgreSQL connection error: {e}")
            self._postgres_pool = None

    async def _get_postgres_pool(self):
        """Get PostgreSQL connection pool, creating if needed."""
        if self._postgres_pool is None:
            await self._create_postgres_pool()
        return self._postgres_pool

    def _create_qdrant_client(self):
        """Create Qdrant client."""
        if QdrantClient is None:
            raise ImportError("qdrant_client required: pip install qdrant_client")
        try:
            self._qdrant_client = QdrantClient(url=self.qdrant_url)
            logger.info("Connected to Qdrant.")
        except Exception as e:
            logger.error(f"Qdrant connection error: {e}")
            self._qdrant_client = None

    def _get_qdrant_client(self):
        """Get Qdrant client, creating if needed."""
        if self._qdrant_client is None:
            self._create_qdrant_client()
        return self._qdrant_client

    async def health_check(self) -> Dict[str, Any]:
        """Check health status of all integrated services."""
        health = {
            "timestamp": datetime.now().isoformat(),
            "ollama": await self._check_ollama_health(),
            "redis": await self._check_redis_health(),
            "postgres": await self._check_postgres_health(),
            "qdrant": self._check_qdrant_health(),
            "overall_status": "online" #Assume online unless a check fails.
        }
        if health["ollama"]["status"] != "online" or health["redis"]["status"] != "online" or health["postgres"]["status"] != "online" or health["qdrant"]["status"] != "online":
            health["overall_status"] = "degraded"

        self.is_connected = health["overall_status"] == "online"
        return health

    async def _check_ollama_health(self) -> Dict[str, Any]:
        """Check Ollama health status."""
        tags_url = self.ollama_url.replace("/api/generate", "/api/tags")
        try:
            client = await self._get_http_client()
            response = await client.get(tags_url, timeout=10.0)
            response.raise_for_status()
            data = response.json()

            models_available = [m.get("name") for m in data.get("models", [])]
            qwen_ready = self.model in models_available or f"{self.model}:latest" in models_available

            return {
                "status": "online" if qwen_ready else "model_missing",
                "models": models_available,
                "target_model": self.model,
                "model_ready": qwen_ready,
                "endpoint": self.ollama_url.split("/api")[0],
            }
        except Exception as e:
            return {
                "status": "offline",
                "error": str(e),
                "endpoint": self.ollama_url.split("/api")[0],
            }

    async def _check_redis_health(self) -> Dict[str, Any]:
        """Check Redis health status."""
        try:
            redis = await self._get_redis_pool()
            if redis:
                return {"status": "online"}
            else:
                return {"status": "offline", "error": "No redis pool"}
        except Exception as e:
            return {"status": "offline", "error": str(e)}

    async def _check_postgres_health(self) -> Dict[str, Any]:
        """Check PostgreSQL health status."""
        try:
            pool = await self._get_postgres_pool()
            if pool:
                async with pool.acquire() as conn:
                    await conn.execute("SELECT 1")  # Simple query
                return {"status": "online"}
            else:
                return {"status": "offline", "error": "No postgres pool"}
        except Exception as e:
            return {"status": "offline", "error": str(e)}

    def _check_qdrant_health(self) -> Dict[str, Any]:
        """Check Qdrant health status."""
        try:
            client = self._get_qdrant_client()
            if client:
                client.get_telemetry_data()
                return {"status": "online"}
            else:
                return {"status": "offline", "error": "No qdrant client"}
        except Exception as e:
            return {"status": "offline", "error": str(e)}

    @rate_limit(calls=5, period=60)  # Example rate limit: 5 calls per 60 seconds
    async def reason(
        self,
        prompt: str,
        context: Optional[Dict[str, Any]] = None,
        system_prompt: Optional[str] = None,
        temperature: float = None,
        max_tokens: int = None,
        strip_thinking: bool = False
    ) -> Dict[str, Any]:
        """
        Send reasoning request to AIVA (Ollama).

        Args:
            prompt: The question or task for AIVA
            context: Optional context dict to include
            system_prompt: Override default system prompt
            temperature: Sampling temperature (0.0-1.0)
            max_tokens: Maximum tokens to generate
            strip_thinking: Remove <think> blocks from response

        Returns:
            Dict with 'response', 'tokens', 'duration', 'success'
        """

        temperature = temperature if temperature is not None else self.temperature
        max_tokens = max_tokens if max_tokens is not None else self.max_tokens
        try:
            # Build full prompt with context
            full_prompt = prompt
            if context:
                context_str = json.dumps(context, indent=2)
                full_prompt = f"Context:\n