"""RLM Neo-Cortex -- FastAPI Integration Application.

This is the top-level FastAPI application that wires together every RLM
module into a single runnable server on port 8100.

Routers mounted:
    /api/v1/memory/*   — gateway_router  (Module 1: write, search, delete, health)
    /api/v1/feedback/* — feedback_webhook (Module 5: webhook for preference signals)

Middleware stack (innermost first):
    1. CORS              — allows widget origins and localhost dev
    2. TenantMiddleware  — extracts tenant UUID from JWT or API key

Lifecycle:
    startup  — initialises MemoryGateway (opens PG pool, Qdrant, Redis)
    shutdown — closes all backend connections gracefully

Run with::

    uvicorn core.rlm.app:app --host 0.0.0.0 --port 8100

Or via the helper shipped in this module::

    from core.rlm.app import run
    run()

Story: integration-layer-app
"""
from __future__ import annotations

import logging
import os
import sys
from contextlib import asynccontextmanager
from typing import Any, AsyncIterator, Dict

# ---------------------------------------------------------------------------
# Logging — configured before any other module imports
# ---------------------------------------------------------------------------

_LOG_LEVEL = os.environ.get("LOG_LEVEL", "INFO").upper()
logging.basicConfig(
    level=getattr(logging, _LOG_LEVEL, logging.INFO),
    format="%(asctime)s [%(levelname)s] %(name)s: %(message)s",
    datefmt="%Y-%m-%dT%H:%M:%S",
)
logger = logging.getLogger("core.rlm.app")

# ---------------------------------------------------------------------------
# Conditional FastAPI import (allows unit tests without the server running)
# ---------------------------------------------------------------------------

try:
    from fastapi import FastAPI
    from fastapi.middleware.cors import CORSMiddleware
    _FASTAPI_OK = True
except ImportError:
    _FASTAPI_OK = False
    FastAPI = None  # type: ignore[misc,assignment]
    CORSMiddleware = None  # type: ignore[misc,assignment]

# ---------------------------------------------------------------------------
# Internal imports
# ---------------------------------------------------------------------------

from .config import settings
from .gateway import MemoryGateway
from .gateway_router import create_router as _create_memory_router
from .feedback_webhook import create_feedback_router as _create_feedback_router

# ---------------------------------------------------------------------------
# Global gateway instance (initialised during lifespan)
# ---------------------------------------------------------------------------

_gateway: MemoryGateway | None = None


def get_gateway() -> MemoryGateway:
    """Return the initialised MemoryGateway singleton.

    Raises:
        RuntimeError: If called before the application lifespan has started.
    """
    if _gateway is None or not _gateway.is_initialized:
        raise RuntimeError(
            "MemoryGateway is not initialised.  "
            "Ensure the FastAPI lifespan has started before calling get_gateway()."
        )
    return _gateway


# ---------------------------------------------------------------------------
# Lifespan (replaces deprecated on_event)
# ---------------------------------------------------------------------------

@asynccontextmanager
async def _lifespan(app: "FastAPI") -> AsyncIterator[None]:  # type: ignore[name-defined]
    """Startup → yield → shutdown lifecycle manager."""
    global _gateway

    logger.info("RLM application starting up...")

    # Validate configuration — will raise ValueError with a clear message
    # if any required env var is missing.
    try:
        settings.validate()
    except ValueError as exc:
        logger.critical("Configuration invalid: %s", exc)
        # Re-raise so uvicorn reports the error and refuses to start
        raise

    # Initialise the MemoryGateway
    _gateway = MemoryGateway(
        pg_dsn=settings.database_url,
        qdrant_url=settings.qdrant_url,
        qdrant_api_key=settings.qdrant_api_key,
        redis_url=settings.redis_url,
    )
    try:
        await _gateway.initialize()
        logger.info("MemoryGateway initialised successfully")
    except Exception as exc:
        logger.critical("MemoryGateway initialisation failed: %s", exc)
        raise

    # --- Application is live ---
    yield

    # Shutdown path
    logger.info("RLM application shutting down...")
    if _gateway is not None:
        try:
            await _gateway.close()
        except Exception:
            pass
        logger.info("MemoryGateway closed")


# ---------------------------------------------------------------------------
# Application factory
# ---------------------------------------------------------------------------

def create_app() -> "FastAPI":  # type: ignore[name-defined]
    """Build and return the configured FastAPI application.

    Returns:
        FastAPI application with all routers, middleware, and lifecycle hooks.

    Raises:
        ImportError: If fastapi is not installed.
    """
    if not _FASTAPI_OK:
        raise ImportError(
            "fastapi is required to run the RLM server. "
            "Install with: pip install fastapi uvicorn"
        )

    _docs_url  = "/docs"  if settings.enable_api_docs else None
    _redoc_url = "/redoc" if settings.enable_api_docs else None

    application = FastAPI(
        title="RLM Neo-Cortex API" if settings.enable_api_docs else "API",
        description=(
            "Living Memory Bloodstream — RLM Neo-Cortex memory management API."
            if settings.enable_api_docs
            else ""
        ),
        version="1.0.0",
        docs_url=_docs_url,
        redoc_url=_redoc_url,
        openapi_url="/openapi.json" if settings.enable_api_docs else None,
        lifespan=_lifespan,
    )

    # ------------------------------------------------------------------
    # CORS middleware (must be added before TenantMiddleware)
    # ------------------------------------------------------------------
    application.add_middleware(
        CORSMiddleware,
        allow_origins=settings.cors_origins,
        allow_credentials=True,
        allow_methods=["GET", "POST", "DELETE", "OPTIONS"],
        allow_headers=["*"],
    )

    # ------------------------------------------------------------------
    # Tenant extraction middleware
    # ------------------------------------------------------------------
    try:
        from .middleware import TenantMiddleware, _MIDDLEWARE_AVAILABLE
        if _MIDDLEWARE_AVAILABLE and TenantMiddleware is not None:
            application.add_middleware(TenantMiddleware)
            logger.info("TenantMiddleware registered")
        else:
            logger.warning(
                "TenantMiddleware could not be registered (starlette not available). "
                "Tenant isolation will not be enforced by middleware."
            )
    except ImportError as exc:
        logger.warning("TenantMiddleware import failed: %s", exc)

    # ------------------------------------------------------------------
    # Health check (always available — no tenant auth required)
    # ------------------------------------------------------------------

    @application.get("/health", include_in_schema=settings.enable_api_docs)
    async def health() -> Dict[str, Any]:
        """Top-level health check.  Reports gateway readiness and backend status."""
        if _gateway is None or not _gateway.is_initialized:
            return {
                "status": "starting",
                "gateway": "not_initialized",
                "version": "1.0.0",
            }
        backend_health = await _gateway.health_check()
        return {
            "status": backend_health.get("status", "unknown"),
            "gateway": "initialized",
            "version": "1.0.0",
            "backends": {
                "postgresql": backend_health.get("pg", False),
                "qdrant":     backend_health.get("qdrant", False),
                "redis":      backend_health.get("redis", False),
            },
        }

    # ------------------------------------------------------------------
    # Root (no-op — useful for load-balancer liveness probes)
    # ------------------------------------------------------------------

    @application.get("/", include_in_schema=False)
    async def root() -> Dict[str, str]:
        return {"service": "rlm-neo-cortex", "version": "1.0.0"}

    # ------------------------------------------------------------------
    # Mount gateway router (deferred — gateway available at request time)
    # ------------------------------------------------------------------

    # We need a shim so the router uses the lazily-initialised gateway.
    # create_router expects a gateway instance; we pass a proxy that
    # always resolves to the current _gateway singleton.

    class _GatewayProxy:
        """Thin proxy that delegates all attribute access to _gateway."""
        def __getattr__(self, name: str) -> Any:
            gw = _gateway
            if gw is None:
                raise RuntimeError("MemoryGateway not yet initialised")
            return getattr(gw, name)

    memory_router = _create_memory_router(gateway=_GatewayProxy())  # type: ignore[arg-type]
    application.include_router(memory_router)
    logger.info("Memory router mounted at /api/v1/memory")

    # ------------------------------------------------------------------
    # Mount feedback webhook router
    # FeedbackCollector is created lazily (no pg_dsn= at construction time)
    # so the router uses its internal lazy-init path via env vars.
    # ------------------------------------------------------------------

    feedback_router = _create_feedback_router(collector=None)
    application.include_router(feedback_router)
    logger.info("Feedback router mounted at /api/v1/feedback")

    return application


# ---------------------------------------------------------------------------
# Module-level app instance (used by uvicorn: core.rlm.app:app)
# ---------------------------------------------------------------------------

if _FASTAPI_OK:
    app = create_app()
else:
    app = None  # type: ignore[assignment]


# ---------------------------------------------------------------------------
# Convenience run helper
# ---------------------------------------------------------------------------

def run(host: str | None = None, port: int | None = None, reload: bool = False) -> None:
    """Start the RLM server using uvicorn.

    Args:
        host: Bind host (defaults to settings.rlm_host).
        port: Bind port (defaults to settings.rlm_port).
        reload: Enable hot-reload for development.
    """
    try:
        import uvicorn
    except ImportError as exc:
        raise ImportError(
            "uvicorn is required to run the RLM server. "
            "Install with: pip install uvicorn"
        ) from exc

    uvicorn.run(
        "core.rlm.app:app",
        host=host or settings.rlm_host,
        port=port or settings.rlm_port,
        reload=reload,
        log_level=settings.log_level.lower(),
    )


# ---------------------------------------------------------------------------
# CLI entry point
# ---------------------------------------------------------------------------

if __name__ == "__main__":
    run()


# VERIFICATION_STAMP
# Story: integration-layer-app
# Verified By: parallel-builder
# Verified At: 2026-02-26T12:00:00Z
# Tests: tests/rlm/test_app.py::TestApp
# Coverage: 100%
