#!/usr/bin/env python3
"""
AIVA Voice Command Bridge - Database Migration Runner
Applies PostgreSQL migrations idempotently with full error handling and logging.
"""

import os
import sys
import logging
import hashlib
import time
from typing import Optional, Dict, Any
from contextlib import contextmanager
from dataclasses import dataclass

import psycopg2
import psycopg2.extras
import psycopg2.pool
from psycopg2.extensions import connection, cursor

# Configuration
DB_HOST = "postgresql-genesis-u50607.vm.elestio.app"
DB_PORT = 25432
DB_NAME = "postgres"
DB_USER = "postgres"
DB_PASSWORD = "CiBjh6LM7Yuqkq-jo2r7eQDw"
DB_SSL_MODE = "require"

MIGRATION_VERSION = "001_initial_schema"
MIGRATION_FILE = "001_initial_schema.sql"

# Logging setup
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s [%(levelname)s] %(name)s: %(message)s',
    handlers=[
        logging.StreamHandler(sys.stdout)
    ]
)
logger = logging.getLogger("genesis_migrator")


@dataclass
class DatabaseConfig:
    host: str
    port: int
    dbname: str
    user: str
    password: str
    sslmode: str = "require"
    connect_timeout: int = 10
    
    def to_dict(self) -> Dict[str, Any]:
        return {
            'host': self.host,
            'port': self.port,
            'dbname': self.dbname,
            'user': self.user,
            'password': self.password,
            'sslmode': self.sslmode,
            'connect_timeout': self.connect_timeout
        }


class MigrationError(Exception):
    """Custom exception for migration failures"""
    pass


class DatabaseMigrationRunner:
    def __init__(self, config: DatabaseConfig):
        self.config = config
        self.connection_pool: Optional[psycopg2.pool.ThreadedConnectionPool] = None
        self._init_pool()
        
    def _init_pool(self) -> None:
        """Initialize connection pool"""
        try:
            self.connection_pool = psycopg2.pool.ThreadedConnectionPool(
                minconn=1,
                maxconn=5,
                **self.config.to_dict()
            )
            logger.info("Database connection pool initialized")
        except psycopg2.Error as e:
            logger.error(f"Failed to initialize connection pool: {e}")
            raise MigrationError(f"Database connection failed: {e}")
    
    @contextmanager
    def get_connection(self) -> connection:
        """Context manager for database connections"""
        conn = None
        try:
            conn = self.connection_pool.getconn()
            conn.autocommit = False
            yield conn
        except psycopg2.Error as e:
            if conn:
                conn.rollback()
            logger.error(f"Database operation error: {e}")
            raise MigrationError(f"Database error: {e}")
        finally:
            if conn:
                self.connection_pool.putconn(conn)
    
    def _calculate_checksum(self, content: str) -> str:
        """Calculate SHA-256 checksum of migration content"""
        return hashlib.sha256(content.encode('utf-8')).hexdigest()
    
    def _ensure_migrations_table(self, conn: connection) -> None:
        """Ensure schema_migrations table exists"""
        try:
            with conn.cursor() as cur:
                cur.execute("""
                    CREATE SCHEMA IF NOT EXISTS genesis_bridge;
                    
                    CREATE TABLE IF NOT EXISTS genesis_bridge.schema_migrations (
                        id SERIAL PRIMARY KEY,
                        version VARCHAR(50) UNIQUE NOT NULL,
                        applied_at TIMESTAMPTZ DEFAULT NOW(),
                        checksum VARCHAR(64),
                        execution_time_ms INTEGER
                    );
                """)
                conn.commit()
        except psycopg2.Error as e:
            conn.rollback()
            logger.error(f"Failed to create migrations table: {e}")
            raise MigrationError(f"Failed to initialize migration tracking: {e}")
    
    def _is_migration_applied(self, conn: connection, version: str) -> bool:
        """Check if specific migration version is already applied"""
        try:
            with conn.cursor() as cur:
                cur.execute("""
                    SELECT 1 FROM genesis_bridge.schema_migrations 
                    WHERE version = %s;
                """, (version,))
                return cur.fetchone() is not None
        except psycopg2.Error as e:
            logger.error(f"Error checking migration status: {e}")
            return False
    
    def _record_migration(self, conn: connection, version: str, checksum: str, execution_time_ms: int) -> None:
        """Record successful migration"""
        try:
            with conn.cursor() as cur:
                cur.execute("""
                    INSERT INTO genesis_bridge.schema_migrations (version, checksum, execution_time_ms)
                    VALUES (%s, %s, %s)
                    ON CONFLICT (version) DO UPDATE SET
                        applied_at = NOW(),
                        checksum = EXCLUDED.checksum,
                        execution_time_ms = EXCLUDED.execution_time_ms;
                """, (version, checksum, execution_time_ms))
        except psycopg2.Error as e:
            logger.error(f"Failed to record migration: {e}")
            raise
    
    def _load_migration_sql(self) -> str:
        """Load migration SQL from file or embedded constant"""
        # Try to load from file first
        if os.path.exists(MIGRATION_FILE):
            try:
                with open(MIGRATION_FILE, 'r', encoding='utf-8') as f:
                    return f.read()
            except IOError as e:
                logger.warning(f"Could not read {MIGRATION_FILE}: {e}")
        
        # Fallback to embedded SQL
        logger.info("Using embedded migration SQL")
        return self._get_embedded_migration_sql()
    
    def _get_embedded_migration_sql(self) -> str:
        """Return embedded migration SQL as fallback"""
        return """
CREATE SCHEMA IF NOT EXISTS genesis_bridge;

CREATE TABLE IF NOT EXISTS genesis_bridge.command_queue (
    id BIGSERIAL PRIMARY KEY,
    direction VARCHAR(10) NOT NULL CHECK (direction IN ('inbound', 'outbound')),
    priority INTEGER DEFAULT 5 CHECK (priority BETWEEN 1 AND 10),
    status VARCHAR(20) DEFAULT 'pending' CHECK (status IN ('pending', 'processing', 'completed', 'failed', 'expired')),
    command_type VARCHAR(50) NOT NULL,
    payload JSONB NOT NULL,
    source VARCHAR(50) NOT NULL,
    target VARCHAR(50) NOT NULL,
    metadata JSONB DEFAULT '{}',
    error_message TEXT,
    retry_count INTEGER DEFAULT 0,
    max_retries INTEGER DEFAULT 3,
    expires_at TIMESTAMPTZ,
    created_at TIMESTAMPTZ DEFAULT NOW(),
    updated_at TIMESTAMPTZ DEFAULT NOW(),
    completed_at TIMESTAMPTZ
);

CREATE INDEX IF NOT EXISTS idx_command_queue_status_direction ON genesis_bridge.command_queue (status, direction, priority DESC, created_at);
CREATE INDEX IF NOT EXISTS idx_command_queue_source ON genesis_bridge.command_queue (source);
CREATE INDEX IF NOT EXISTS idx_command_queue_target ON genesis_bridge.command_queue (target);
CREATE INDEX IF NOT EXISTS idx_command_queue_created_at ON genesis_bridge.command_queue (created_at);
CREATE INDEX IF NOT EXISTS idx_command_queue_expires_pending ON genesis_bridge.command_queue (expires_at) WHERE status = 'pending';

CREATE OR REPLACE FUNCTION genesis_bridge.update_updated_at_column()
RETURNS TRIGGER AS $$
BEGIN
    NEW.updated_at = NOW();
    RETURN NEW;
END;
$$ LANGUAGE plpgsql;

DO $$
BEGIN
    IF NOT EXISTS (
        SELECT 1 FROM pg_trigger 
        WHERE tgname = 'trg_command_queue_updated_at'
        AND tgrelid = 'genesis_bridge.command_queue'::regclass
    ) THEN
        CREATE TRIGGER trg_command_queue_updated_at
        BEFORE UPDATE ON genesis_bridge.command_queue
        FOR EACH ROW
        EXECUTE FUNCTION genesis_bridge.update_updated_at_column();
    END IF;
END $$;

CREATE TABLE IF NOT EXISTS genesis_bridge.bridge_sessions (
    id BIGSERIAL PRIMARY KEY,
    session_id VARCHAR(100) UNIQUE NOT NULL,
    phone_number VARCHAR(20),
    telnyx_call_control_id VARCHAR(100),
    status VARCHAR(20) DEFAULT 'active' CHECK (status IN ('active', 'inactive', 'closed')),
    started_at TIMESTAMPTZ DEFAULT NOW(),
    last_activity_at TIMESTAMPTZ DEFAULT NOW(),
    ended_at TIMESTAMPTZ,
    metadata JSONB DEFAULT '{}',
    created_at TIMESTAMPTZ DEFAULT NOW(),
    updated_at TIMESTAMPTZ DEFAULT NOW()
);

CREATE INDEX IF NOT EXISTS idx_bridge_sessions_status ON genesis_bridge.bridge_sessions (status);
CREATE INDEX IF NOT EXISTS idx_bridge_sessions_session_id ON genesis_bridge.bridge_sessions (session_id);
CREATE INDEX IF NOT EXISTS idx_bridge_sessions_phone ON genesis_bridge.bridge_sessions (phone_number);

DO $$
BEGIN
    IF NOT EXISTS (
        SELECT 1 FROM pg_trigger 
        WHERE tgname = 'trg_bridge_sessions_updated_at'
        AND tgrelid = 'genesis_bridge.bridge_sessions'::regclass
    ) THEN
        CREATE TRIGGER trg_bridge_sessions_updated_at
        BEFORE UPDATE ON genesis_bridge.bridge_sessions
        FOR EACH ROW
        EXECUTE FUNCTION genesis_bridge.update_updated_at_column();
    END IF;
END $$;

CREATE TABLE IF NOT EXISTS genesis_bridge.audit_log (
    id BIGSERIAL PRIMARY KEY,
    table_name VARCHAR(50) NOT NULL,
    record_id BIGINT,
    operation VARCHAR(10) NOT NULL CHECK (operation IN ('INSERT', 'UPDATE', 'DELETE')),
    old_data JSONB,
    new_data JSONB,
    performed_by VARCHAR(100),
    session_id VARCHAR(100),
    performed_at TIMESTAMPTZ DEFAULT NOW(),
    ip_address INET
);

CREATE INDEX IF NOT EXISTS idx_audit_log_table_record ON genesis_bridge.audit_log (table_name, record_id);
CREATE INDEX IF NOT EXISTS idx_audit_log_performed_at ON genesis_bridge.audit_log (performed_at DESC);
CREATE INDEX IF NOT EXISTS idx_audit_log_operation ON genesis_bridge.audit_log (operation);

CREATE TABLE IF NOT EXISTS genesis_bridge.schema_migrations (
    id SERIAL PRIMARY KEY,
    version VARCHAR(50) UNIQUE NOT NULL,
    applied_at TIMESTAMPTZ DEFAULT NOW(),
    checksum VARCHAR(64),
    execution_time_ms INTEGER
);
"""
    
    def migrate(self) -> bool:
        """
        Execute migration idempotently.
        Returns True if successful or already applied, False on failure.
        """
        logger.info(f"Starting migration: {MIGRATION_VERSION}")
        
        try:
            with self.get_connection() as conn:
                # Ensure tracking table exists
                self._ensure_migrations_table(conn)
                
                # Check if already applied
                if self._is_migration_applied(conn, MIGRATION_VERSION):
                    logger.info(f"Migration {MIGRATION_VERSION} already applied. Skipping.")
                    return True
                
                # Load and execute migration
                migration_sql = self._load_migration_sql()
                checksum = self._calculate_checksum(migration_sql)
                
                logger.info(f"Applying migration SQL (checksum: {checksum[:8]}...)")
                start_time = time.time()
                
                try:
                    with conn.cursor() as cur:
                        cur.execute(migration_sql)
                    
                    execution_time_ms = int((time.time() - start_time) * 1000)
                    self._record_migration(conn, MIGRATION_VERSION, checksum, execution_time_ms)
                    
                    conn.commit()
                    logger.info(f"Migration {MIGRATION_VERSION} applied successfully in {execution_time_ms}ms")
                    return True
                    
                except psycopg2.Error as e:
                    conn.rollback()
                    logger.error(f"Migration SQL execution failed: {e}")
                    raise MigrationError(f"Migration execution failed: {e}")
                    
        except MigrationError as e:
            logger.error(f"Migration failed: {e}")
            return False
        except Exception as e:
            logger.error(f"Unexpected error: {e}", exc_info=True)
            return False
    
    def verify_schema(self) -> bool:
        """Verify that all expected objects exist"""
        logger.info("Verifying schema integrity...")
        
        expected_objects = [
            ("table", "genesis_bridge", "command_queue"),
            ("table", "genesis_bridge", "bridge_sessions"),
            ("table", "genesis_bridge", "audit_log"),
            ("table", "genesis_bridge", "schema_migrations"),
        ]
        
        try:
            with self.get_connection() as conn:
                with conn.cursor() as cur:
                    for obj_type, schema, name in expected_objects:
                        cur.execute("""
                            SELECT 1 FROM information_schema.tables 
                            WHERE table_schema = %s AND table_name = %s;
                        """, (schema, name))
                        
                        if not cur.fetchone():
                            logger.error(f"Missing {obj_type}: {schema}.{name}")
                            return False
                        
                        logger.info(f"Verified {obj_type}: {schema}.{name}")
            
            logger.info("Schema verification completed successfully")
            return True
            
        except Exception as e:
            logger.error(f"Schema verification failed: {e}")
            return False
    
    def close(self) -> None:
        """Close all connections"""
        if self.connection_pool:
            self.connection_pool.closeall()
            logger.info("Connection pool closed")


def main() -> int:
    """Main entry point"""
    config = DatabaseConfig(
        host=DB_HOST,
        port=DB_PORT,
        dbname=DB_NAME,
        user=DB_USER,
        password=DB_PASSWORD,
        sslmode=DB_SSL_MODE
    )
    
    runner = None
    exit_code = 0
    
    try:
        runner = DatabaseMigrationRunner(config)
        
        # Run migration
        if not runner.migrate():
            exit_code = 1
        else:
            # Verify if migration was applied or already existed
            if not runner.verify_schema():
                logger.error("Schema verification failed")
                exit_code = 1
                
    except KeyboardInterrupt:
        logger.info("Migration interrupted by user")
        exit_code = 130
    except Exception as e:
        logger.error(f"Fatal error: {e}", exc_info=True)
        exit_code = 1
    finally:
        if runner:
            runner.close()
    
    return exit_code


if __name__ == "__main__":
    sys.exit(main())