# integration_hub.py
import os
import asyncio
import logging
from datetime import datetime
from typing import Optional, Dict, Any, List, Tuple
from pathlib import Path
import json

# Optional dependencies (install if needed)
try:
    import httpx
except ImportError:
    httpx = None

try:
    import redis.asyncio as aioredis
except ImportError:
    aioredis = None

try:
    import asyncpg
except ImportError:
    asyncpg = None

try:
    import qdrant_client
    from qdrant_client import QdrantClient, models
except ImportError:
    qdrant_client = None
    models = None

# Setup logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("IntegrationHub")

# Load environment from .env file (if available)
ENV_PATH = Path(__file__).parent / "integration_config.env"
if ENV_PATH.exists():
    try:
        from dotenv import load_dotenv
        load_dotenv(ENV_PATH)
    except ImportError:
        # Manual env loading fallback
        with open(ENV_PATH) as f:
            for line in f:
                line = line.strip()
                if line and not line.startswith('#') and '=' in line:
                    key, value = line.split('=', 1)
                    os.environ.setdefault(key.strip(), value.strip())


class IntegrationHub:
    """
    Central integration hub for connecting various services.
    """

    def __init__(self):
        # Configuration from environment variables with defaults
        self.ollama_url = os.getenv("OLLAMA_URL", "http://localhost:11434/api/generate")
        self.redis_url = os.getenv("REDIS_URL", "redis://localhost:6379")
        self.postgres_url = os.getenv("POSTGRES_URL", "postgresql://user:password@localhost:5432/database")
        self.qdrant_url = os.getenv("QDRANT_URL", "http://localhost:6333")
        self.qdrant_api_key = os.getenv("QDRANT_API_KEY")
        self.n8n_url = os.getenv("N8N_URL", "http://localhost:5678/webhook")  # Example n8n webhook
        self.ghl_api_key = os.getenv("GHL_API_KEY")
        self.stripe_api_key = os.getenv("STRIPE_API_KEY")
        self.telegram_bot_token = os.getenv("TELEGRAM_BOT_TOKEN")
        self.aiva_model = os.getenv("AIVA_MODEL", "mistralai/Mistral-7B-Instruct-v0.1")
        self.timeout = float(os.getenv("INTEGRATION_TIMEOUT", "120"))  # Default: 2 minutes

        # Connection pools and clients
        self._http_client = None
        self._redis_client = None
        self._postgres_pool = None
        self._qdrant_client = None

        # Health status
        self.health = {
            "ollama": "unknown",
            "redis": "unknown",
            "postgres": "unknown",
            "qdrant": "unknown",
            "n8n": "unknown",
            "ghl": "unknown",
            "stripe": "unknown",
            "telegram": "unknown"
        }

        # Rate limiting (example: requests per minute)
        self.rate_limits = {
            "ollama": {"limit": 60, "period": 60},
            "redis": {"limit": 100, "period": 60},
            "postgres": {"limit": 30, "period": 60},
            "qdrant": {"limit": 50, "period": 60},
            "n8n": {"limit": 20, "period": 60},
            "ghl": {"limit": 10, "period": 60},
            "stripe": {"limit": 5, "period": 60},
            "telegram": {"limit": 30, "period": 60}
        }
        self.last_request_times = {k: [] for k in self.rate_limits}

        logger.info("IntegrationHub initialized.")

    # ---------------------- Connection Management ----------------------

    async def _get_http_client(self) -> "httpx.AsyncClient":
        """Get or create HTTP client."""
        if httpx is None:
            raise ImportError("httpx required: pip install httpx")
        if self._http_client is None or self._http_client.is_closed:
            self._http_client = httpx.AsyncClient(timeout=self.timeout)
        return self._http_client

    async def _get_redis_client(self) -> "aioredis.Redis":
        """Get or create Redis client."""
        if aioredis is None:
            raise ImportError("redis required: pip install redis")
        if self._redis_client is None:
            self._redis_client = aioredis.from_url(self.redis_url)
        return self._redis_client

    async def _get_postgres_pool(self) -> "asyncpg.Pool":
        """Get or create PostgreSQL connection pool."""
        if asyncpg is None:
            raise ImportError("asyncpg required: pip install asyncpg")
        if self._postgres_pool is None:
            try:
                self._postgres_pool = await asyncpg.create_pool(self.postgres_url)
            except Exception as e:
                logger.error(f"Postgres connection error: {e}")
                self.health["postgres"] = "offline"
                raise
        return self._postgres_pool

    def _get_qdrant_client(self) -> "QdrantClient":
        """Get or create Qdrant client."""
        if qdrant_client is None:
            raise ImportError("qdrant_client required: pip install qdrant-client")
        if self._qdrant_client is None:
            try:
                self._qdrant_client = QdrantClient(url=self.qdrant_url, api_key=self.qdrant_api_key)
            except Exception as e:
                logger.error(f"Qdrant connection error: {e}")
                self.health["qdrant"] = "offline"
                raise
        return self._qdrant_client

    async def close(self):
        """Close all connections."""
        if self._http_client and not self._http_client.is_closed:
            await self._http_client.aclose()
        if self._redis_client:
            await self._redis_client.close()
        if self._postgres_pool:
            await self._postgres_pool.close()
        if self._qdrant_client:
            self._qdrant_client.close()
        logger.info("IntegrationHub connections closed.")

    # ---------------------- Rate Limiting ----------------------

    def _is_rate_limited(self, service: str) -> bool:
        """Check if a service is rate limited."""
        now = datetime.now().timestamp()
        requests = [t for t in self.last_request_times[service] if now - t < self.rate_limits[service]["period"]]
        return len(requests) >= self.rate_limits[service]["limit"]

    def _update_rate_limit(self, service: str):
        """Update the rate limit timestamp."""
        self.last_request_times[service].append(datetime.now().timestamp())
        # Keep only relevant timestamps
        now = datetime.now().timestamp()
        self.last_request_times[service] = [t for t in self.last_request_times[service] if now - t < self.rate_limits[service]["period"]]

    # ---------------------- Health Checks ----------------------

    async def health_check(self) -> Dict[str, str]:
        """Check the health of all integrated services."""
        self.health["ollama"] = await self._check_ollama_health()
        self.health["redis"] = await self._check_redis_health()
        self.health["postgres"] = await self._check_postgres_health()
        self.health["qdrant"] = self._check_qdrant_health()
        self.health["n8n"] = await self._check_n8n_health()
        self.health["ghl"] = await self._check_ghl_health()
        self.health["stripe"] = await self._check_stripe_health()
        self.health["telegram"] = await self._check_telegram_health()
        return self.health

    async def _check_ollama_health(self) -> str:
        """Check Ollama health."""
        try:
            if self._is_rate_limited("ollama"):
                return "rate_limited"
            tags_url = self.ollama_url.replace("/api/generate", "/api/tags")
            client = await self._get_http_client()
            response = await client.get(tags_url, timeout=10.0)
            response.raise_for_status()
            data = response.json()
            models_available = [m.get("name") for m in data.get("models", [])]
            model_ready = self.aiva_model in models_available or f"{self.aiva_model}:latest" in models_available

            self._update_rate_limit("ollama")
            return "online" if model_ready else "model_missing"
        except Exception as e:
            logger.warning(f"Ollama health check failed: {e}")
            return "offline"

    async def _check_redis_health(self) -> str:
        """Check Redis health."""
        try:
            if self._is_rate_limited("redis"):
                return "rate_limited"
            redis = await self._get_redis_client()
            await redis.ping()
            self._update_rate_limit("redis")
            return "online"
        except Exception as e:
            logger.warning(f"Redis health check failed: {e}")
            return "offline"

    async def _check_postgres_health(self) -> str:
        """Check PostgreSQL health."""
        try:
            if self._is_rate_limited("postgres"):
                return "rate_limited"
            pool = await self._get_postgres_pool()
            async with pool.acquire() as conn:
                await conn.execute("SELECT 1")
            self._update_rate_limit("postgres")
            return "online"
        except Exception as e:
            logger.warning(f"Postgres health check failed: {e}")
            return "offline"

    def _check_qdrant_health(self) -> str:
        """Check Qdrant health."""
        try:
            if self._is_rate_limited("qdrant"):
                return "rate_limited"
            client = self._get_qdrant_client()
            client.get_telemetry_data() # any call to qdrant will do
            self._update_rate_limit("qdrant")
            return "online"
        except Exception as e:
            logger.warning(f"Qdrant health check failed: {e}")
            return "offline"

    async def _check_n8n_health(self) -> str:
        """Check n8n health (by trying to trigger a simple webhook)."""
        try:
            if self._is_rate_limited("n8n"):
                return "rate_limited"
            client = await self._get_http_client()
            response = await client.post(self.n8n_url, json={"test": "health_check"}, timeout=10.0)
            response.raise_for_status()  # Check for HTTP errors
            self._update_rate_limit("n8n")
            return "online"
        except Exception as e:
            logger.warning(f"n8n health check failed: {e}")
            return "offline"

    async def _check_ghl_health(self) -> str:
        """Placeholder for GoHighLevel API health check."""
        # Implement actual GHL API call for health check
        if not self.ghl_api_key:
            return "unconfigured"
        try:
            if self._is_rate_limited("ghl"):
                return "rate_limited"
            # Replace with actual GHL API call
            client = await self._get_http_client()
            url = "https://services.leadconnectorhq.com/locations" # Example endpoint
            headers = {"Authorization": f"Bearer {self.ghl_api_key}"}
            response = await client.get(url, headers=headers, timeout=10.0)
            response.raise_for_status()
            self._update_rate_limit("ghl")
            return "online"
        except Exception as e:
            logger.warning(f"GHL health check failed: {e}")
            return "offline"

    async def _check_stripe_health(self) -> str:
        """Placeholder for Stripe API health check."""
        # Implement actual Stripe API call for health check
        if not self.stripe_api_key:
            return "unconfigured"
        try:
            if self._is_rate_limited("stripe"):
                return "rate_limited"
            # Replace with actual Stripe API call (e.g., list customers)
            client = await self._get_http_client()
            url = "https://api.stripe.com/v1/customers"
            headers = {"Authorization": f"Bearer {self.stripe_api_key}"}
            response = await client.get(url, headers=headers, timeout=10.0)
            response.raise_for_status()
            self._update_rate_limit("stripe")
            return "online"
        except Exception as e:
            logger.warning(f"Stripe health check failed: {e}")
            return "offline"

    async def _check_telegram_health(self) -> str:
        """Placeholder for Telegram Bot API health check."""
        if not self.telegram_bot_token:
            return "unconfigured"
        try:
            if self._is_rate_limited("telegram"):
                return "rate_limited"

            bot_url = f"https://api.telegram.org/bot{self.telegram_bot_token}/getMe"
            client = await self._get_http_client()
            response = await client.get(bot_url, timeout=10.0)
            response.raise_for_status()
            data = response.json()
            if data.get("ok"):
                self._update_rate_limit("telegram")
                return "online"
            else:
                return "offline"
        except Exception as e:
            logger.warning(f"Telegram health check failed: {e}")
            return "offline"

    # ---------------------- Unified Interface Methods ----------------------

    async def reason(
        self,
        prompt: str,
        context: Optional[Dict[str, Any]] = None,
        system_prompt: Optional[str] = None,
        temperature: float = 0.7,
        max_tokens: int = 4096,
        strip_thinking: bool = False
    ) -> Dict[str, Any]:
        """
        Send reasoning request to AIVA (Ollama).

        Args:
            prompt: The question or task for AIVA
            context: Optional context dict to include
            system_prompt: Override default system prompt
            temperature: Sampling temperature (0.0-1.0)
            max_tokens: Maximum tokens to generate
            strip_thinking: Remove <think> blocks from response

        Returns:
            Dict with 'response', 'tokens', 'duration', 'success'
        """
        if self.health["ollama"] != "online":
            return {"success": False, "error": "Ollama is offline"}

        if self._is_rate_limited("ollama"):
            return {"success": False, "error": "Ollama rate limit exceeded"}

        try:
            # Build full prompt with context
            full_prompt = prompt
            if context:
                context_str = json.dumps(context, indent=2)
                full_prompt = f"Context:\n