#!/usr/bin/env python3
"""
RLM Integration Tests
======================
Tests for the Genesis RLM (Recursive Language Models) bloodstream system.

Tests verify:
1. Database connectivity (PostgreSQL, Redis)
2. RLM Gateway initialization
3. Constitutional AI loading
4. Interaction processing (shadow mode)
5. Preference pair collection
6. Feedback collection
7. Bloodstream pipeline dry-run
8. Elestio config methods

Author: Genesis RLM Specialist (Claude Opus 4.6)
Date: 2026-02-20
"""

import asyncio
import json
import os
import sys
import unittest
from pathlib import Path
from unittest.mock import MagicMock, patch

# Ensure genesis paths are importable
GENESIS_ROOT = Path("/mnt/e/genesis-system")
sys.path.insert(0, str(GENESIS_ROOT))
sys.path.insert(0, str(GENESIS_ROOT / "data" / "genesis-memory"))
sys.path.insert(0, str(GENESIS_ROOT / "AIVA" / "queen_outputs" / "rlm"))


# ============================================================================
# Test 1: Elestio Config Methods
# ============================================================================

class TestElestioConfig(unittest.TestCase):
    """Test that all Elestio config classes expose the expected methods."""

    def test_postgres_config_has_get_connection_params(self):
        from elestio_config import PostgresConfig
        params = PostgresConfig.get_connection_params()
        self.assertIn("host", params)
        self.assertIn("port", params)
        self.assertIn("user", params)
        self.assertIn("password", params)
        self.assertIn("database", params)
        self.assertIsInstance(params["port"], int)

    def test_redis_config_has_get_connection_params(self):
        from elestio_config import RedisConfig
        params = RedisConfig.get_connection_params()
        self.assertIn("host", params)
        self.assertIn("port", params)
        self.assertIn("username", params)
        self.assertIn("password", params)
        self.assertIn("decode_responses", params)

    def test_redis_config_has_get_client_params_alias(self):
        """Verify that the get_client_params alias exists and returns same data."""
        from elestio_config import RedisConfig
        connection_params = RedisConfig.get_connection_params()
        client_params = RedisConfig.get_client_params()
        self.assertEqual(connection_params, client_params)

    def test_qdrant_config_has_get_client_params(self):
        from elestio_config import QdrantConfig
        params = QdrantConfig.get_client_params()
        self.assertIn("url", params)
        self.assertIn("api_key", params)


# ============================================================================
# Test 2: Constitution JSON
# ============================================================================

class TestConstitution(unittest.TestCase):
    """Test that constitution.json is well-formed and complete."""

    def setUp(self):
        self.constitution_path = GENESIS_ROOT / "AIVA" / "constitution.json"

    def test_constitution_file_exists(self):
        self.assertTrue(self.constitution_path.exists(),
                        f"Constitution file not found at {self.constitution_path}")

    def test_constitution_is_valid_json(self):
        with open(self.constitution_path) as f:
            data = json.load(f)
        self.assertIn("principles", data)
        self.assertIn("enforcement", data)

    def test_constitution_has_principles(self):
        with open(self.constitution_path) as f:
            data = json.load(f)
        principles = data["principles"]
        self.assertGreaterEqual(len(principles), 5,
                                "Constitution should have at least 5 principles")

    def test_each_principle_has_required_fields(self):
        with open(self.constitution_path) as f:
            data = json.load(f)
        for p in data["principles"]:
            self.assertIn("id", p, f"Principle missing 'id': {p}")
            self.assertIn("name", p, f"Principle missing 'name': {p}")
            self.assertIn("description", p, f"Principle missing 'description': {p}")
            self.assertIn("severity", p, f"Principle missing 'severity': {p}")

    def test_principle_ids_are_unique(self):
        with open(self.constitution_path) as f:
            data = json.load(f)
        ids = [p["id"] for p in data["principles"]]
        self.assertEqual(len(ids), len(set(ids)),
                         f"Duplicate principle IDs found: {ids}")

    def test_enforcement_config(self):
        with open(self.constitution_path) as f:
            data = json.load(f)
        enforcement = data["enforcement"]
        self.assertIn("pre_send_check", enforcement)
        self.assertIn("self_critique_enabled", enforcement)
        self.assertIn("revision_loop_max_iterations", enforcement)


# ============================================================================
# Test 3: RLM Gateway Import and Initialization
# ============================================================================

class TestRLMGatewayImport(unittest.TestCase):
    """Test that the RLM Gateway module can be imported."""

    def test_gateway_module_imports(self):
        """Verify the rlm_gateway module can be imported without error."""
        try:
            from AIVA.rlm_gateway import RLMGateway, get_gateway
        except ImportError as e:
            self.fail(f"Failed to import RLM Gateway: {e}")

    def test_gateway_convenience_functions_exist(self):
        from AIVA.rlm_gateway import process_aiva_output, collect_feedback
        self.assertTrue(callable(process_aiva_output))
        self.assertTrue(callable(collect_feedback))


# ============================================================================
# Test 4: Surprise Memory System
# ============================================================================

class TestSurpriseMemory(unittest.TestCase):
    """Test the surprise memory engine (core/surprise_memory.py)."""

    def test_memory_system_imports(self):
        from core.surprise_memory import MemorySystem, get_memory_system
        self.assertTrue(callable(get_memory_system))

    def test_evaluate_routine_content(self):
        from core.surprise_memory import MemorySystem
        ms = MemorySystem()
        result = ms.evaluate("The weather is normal today.", "test", "general")
        self.assertIn("score", result)
        self.assertIn("tier", result)
        self.assertIn("composite_score", result["score"])

    def test_evaluate_high_impact_content(self):
        from core.surprise_memory import MemorySystem
        ms = MemorySystem()
        result = ms.evaluate(
            "CRITICAL error: AIVA deployment failed unexpectedly during revenue call.",
            "system", "operations"
        )
        self.assertIn("score", result)
        score = result["score"]["composite_score"]
        self.assertGreater(score, 0.3, "Critical content should have higher surprise score")

    def test_prediction_workflow(self):
        from core.surprise_memory import MemorySystem
        ms = MemorySystem()
        pred_id = ms.make_prediction(
            domain="test",
            expected_outcome="Test passes",
            confidence=0.9
        )
        self.assertIsInstance(pred_id, str)
        error, score = ms.resolve_prediction(pred_id, "Test failed unexpectedly")
        self.assertGreater(error, 0.0)

    def test_observe_compatibility(self):
        from core.surprise_memory import MemorySystem
        ms = MemorySystem()
        score = ms.observe("call_ended", "Caller booked an appointment", {"domain": "sales"})
        self.assertIsNotNone(score)
        self.assertIsNotNone(score.composite_score)

    def test_get_stats(self):
        from core.surprise_memory import MemorySystem
        ms = MemorySystem()
        stats = ms.get_stats()
        self.assertIn("total_events", stats)
        self.assertIn("avg_surprise", stats)


# ============================================================================
# Test 5: RLM Gateway Outcome Inference
# ============================================================================

class TestOutcomeInference(unittest.TestCase):
    """Test the _infer_outcome_label method of RLMGateway."""

    def setUp(self):
        """Create gateway instance for testing outcome inference."""
        # We only need the method, not full init with DB
        from AIVA.rlm_gateway import RLMGateway
        self.gateway = RLMGateway.__new__(RLMGateway)

    def test_positive_outcome(self):
        transcript = "Thank you so much, that sounds perfect. Please book me in for Thursday."
        label = self.gateway._infer_outcome_label(transcript, "completed", 180)
        self.assertEqual(label, "positive")

    def test_negative_outcome(self):
        transcript = "Forget it, this is useless. Do not call me again."
        label = self.gateway._infer_outcome_label(transcript, "completed", 45)
        self.assertEqual(label, "negative")

    def test_short_call_negative(self):
        transcript = "Hello?"
        label = self.gateway._infer_outcome_label(transcript, "completed", 15)
        self.assertEqual(label, "negative",
                         "Calls < 30s should lean negative")

    def test_error_outcome(self):
        transcript = "Something went wrong"
        label = self.gateway._infer_outcome_label(transcript, "error", 60)
        self.assertEqual(label, "negative",
                         "Error outcomes should always be negative")

    def test_neutral_outcome(self):
        transcript = "I need to check on something and will call back later."
        label = self.gateway._infer_outcome_label(transcript, "completed", 90)
        self.assertEqual(label, "neutral")


# ============================================================================
# Test 6: Checkpoint Directory Structure
# ============================================================================

class TestCheckpointDirectory(unittest.TestCase):
    """Test that checkpoint directories exist."""

    def test_reward_model_dir_exists(self):
        path = GENESIS_ROOT / "data" / "rlm_checkpoints" / "reward_model"
        self.assertTrue(path.exists(), f"Missing: {path}")

    def test_policy_dir_exists(self):
        path = GENESIS_ROOT / "data" / "rlm_checkpoints" / "policy"
        self.assertTrue(path.exists(), f"Missing: {path}")

    def test_constitution_dir_exists(self):
        path = GENESIS_ROOT / "data" / "rlm_checkpoints" / "constitution"
        self.assertTrue(path.exists(), f"Missing: {path}")


# ============================================================================
# Test 7: SQL Schema Files Exist
# ============================================================================

class TestSQLSchemaFiles(unittest.TestCase):
    """Test that SQL schema files are present and non-empty."""

    def test_rlm_create_tables_sql_exists(self):
        path = GENESIS_ROOT / "scripts" / "rlm_create_tables.sql"
        self.assertTrue(path.exists())
        self.assertGreater(path.stat().st_size, 1000,
                           "rlm_create_tables.sql should be >1KB")

    def test_aiva_rlm_schema_sql_exists(self):
        path = GENESIS_ROOT / "scripts" / "aiva_rlm_schema.sql"
        self.assertTrue(path.exists())
        self.assertGreater(path.stat().st_size, 500,
                           "aiva_rlm_schema.sql should be >500B")


# ============================================================================
# Test 8: Bloodstream Pipeline Dry Run
# ============================================================================

class TestBloodstreamPipeline(unittest.TestCase):
    """Test that the bloodstream pipeline module loads and basic methods work."""

    def test_pipeline_imports(self):
        from core.rlm_bloodstream_pipeline import BloodstreamPipeline
        self.assertTrue(callable(BloodstreamPipeline))

    def test_kg_directory_scanning(self):
        from core.rlm_bloodstream_pipeline import _scan_jsonl_dir
        entities_dir = GENESIS_ROOT / "KNOWLEDGE_GRAPH" / "entities"
        if entities_dir.exists():
            results = _scan_jsonl_dir(entities_dir, "entity")
            self.assertIsInstance(results, list)
            # Should find at least some entities
            self.assertGreater(len(results), 0,
                               "KG entities directory should have entries")

    def test_axiom_directory_scanning(self):
        from core.rlm_bloodstream_pipeline import _scan_jsonl_dir
        axioms_dir = GENESIS_ROOT / "KNOWLEDGE_GRAPH" / "axioms"
        if axioms_dir.exists():
            results = _scan_jsonl_dir(axioms_dir, "axiom")
            self.assertIsInstance(results, list)

    def test_confidence_parsing(self):
        from core.rlm_bloodstream_pipeline import _parse_confidence
        self.assertAlmostEqual(_parse_confidence(0.9), 0.9)
        self.assertAlmostEqual(_parse_confidence("HIGH"), 0.85)
        self.assertAlmostEqual(_parse_confidence("VERY_HIGH"), 0.95)
        self.assertAlmostEqual(_parse_confidence("LOW"), 0.45)
        self.assertAlmostEqual(_parse_confidence(None), 0.85)
        self.assertAlmostEqual(_parse_confidence("0.7"), 0.7)
        # Edge: percentage string like "95"
        self.assertAlmostEqual(_parse_confidence("95"), 0.95)


# ============================================================================
# Main
# ============================================================================

if __name__ == "__main__":
    print("=" * 70)
    print("  GENESIS RLM INTEGRATION TEST SUITE")
    print("=" * 70)
    print()

    # Run tests with verbosity
    unittest.main(verbosity=2)
