# integration_hub.py
import asyncio
import logging
import os
from datetime import datetime
from typing import Any, Dict, List, Optional, Tuple

import httpx
import psycopg
import redis.asyncio as aioredis
from qdrant_client import QdrantClient, models
from redis.asyncio.connection import ConnectionPool

# Setup logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("IntegrationHub")

# Load environment variables (assuming .env file or environment variables set)
# You might want to use a library like `python-dotenv` for .env file handling
# See aiva_bridge.py for an example

class IntegrationHub:
    """
    Central hub for integrating various services:
        - Ollama/QwenLong
        - Redis
        - PostgreSQL
        - Qdrant
        - n8n
        - External APIs (GHL, Stripe, Telegram)
    """

    def __init__(self):
        # Core Service Configuration
        self.ollama_url = os.getenv("OLLAMA_URL", "http://152.53.201.152:23405/api/generate")
        self.ollama_model = os.getenv("AIVA_MODEL", "qwen-long")
        self.redis_url = os.getenv("REDIS_URL", "redis://:e2ZyYYr4oWRdASI2CaLc-@redis-genesis-u50607.vm.elestio.app:26379")
        self.postgres_url = os.getenv("POSTGRES_URL", "postgresql://user:password@host:port/database") # Example
        self.qdrant_url = os.getenv("QDRANT_URL", "http://localhost:6333")
        self.n8n_url = os.getenv("N8N_URL", "http://localhost:5678")

        # External API Keys (Replace with your actual keys)
        self.ghl_api_key = os.getenv("GHL_API_KEY", "your_ghl_api_key")
        self.stripe_api_key = os.getenv("STRIPE_API_KEY", "your_stripe_api_key")
        self.telegram_bot_token = os.getenv("TELEGRAM_BOT_TOKEN", "your_telegram_bot_token")

        # Connection Pools and Clients
        self._redis_pool: Optional[ConnectionPool] = None
        self._postgres_pool: Optional[psycopg.AsyncConnectionPool] = None
        self._qdrant_client: Optional[QdrantClient] = None
        self._http_client: Optional[httpx.AsyncClient] = None

        # Rate Limiting (Example: 5 requests per second for Ollama)
        self.ollama_rate_limit = 5
        self._ollama_semaphore = asyncio.Semaphore(self.ollama_rate_limit)

        # Health Status
        self.health = {
            "ollama": "unknown",
            "redis": "unknown",
            "postgres": "unknown",
            "qdrant": "unknown",
            "n8n": "unknown",
            "ghl": "unknown",
            "stripe": "unknown",
            "telegram": "unknown"
        }

        logger.info("IntegrationHub initialized.")

    async def _get_http_client(self) -> httpx.AsyncClient:
        """Get or create HTTP client with connection pooling."""
        if self._http_client is None:
            self._http_client = httpx.AsyncClient(timeout=30.0)
        return self._http_client

    async def _get_redis_pool(self) -> ConnectionPool:
        """Get or create Redis connection pool."""
        if self._redis_pool is None:
            self._redis_pool = aioredis.ConnectionPool.from_url(self.redis_url)
        return self._redis_pool

    async def _get_postgres_pool(self) -> psycopg.AsyncConnectionPool:
        """Get or create PostgreSQL connection pool."""
        if self._postgres_pool is None:
            try:
                self._postgres_pool = psycopg.AsyncConnectionPool(self.postgres_url, min_size=2, max_size=10)
                async with self._postgres_pool.connection() as conn:
                    async with conn.cursor() as cur:
                        await cur.execute("SELECT 1")  # Simple test query
                logger.info("PostgreSQL connection pool created successfully.")
            except Exception as e:
                logger.error(f"Error creating PostgreSQL connection pool: {e}")
                self._postgres_pool = None
        return self._postgres_pool

    def _get_qdrant_client(self) -> QdrantClient:
        """Get or create Qdrant client."""
        if self._qdrant_client is None:
            self._qdrant_client = QdrantClient(url=self.qdrant_url)
        return self._qdrant_client

    # ------------------- Service-Specific Methods -------------------

    async def ollama_reason(self, prompt: str, context: Optional[Dict[str, Any]] = None,
                        system_prompt: Optional[str] = None, temperature: float = 0.7,
                        max_tokens: int = 4096, strip_thinking: bool = False) -> Dict[str, Any]:
        """
        Send reasoning request to Ollama.  Applies rate limiting.

        Args:
            prompt: The question or task for AIVA
            context: Optional context dict to include
            system_prompt: Override default system prompt
            temperature: Sampling temperature (0.0-1.0)
            max_tokens: Maximum tokens to generate
            strip_thinking: Remove <think> blocks from response

        Returns:
            Dict with 'response', 'tokens', 'duration', 'success'
        """
        async with self._ollama_semaphore:
            try:
                full_prompt = prompt
                if context:
                    context_str = json.dumps(context, indent=2)
                    full_prompt = f"Context:\n