#!/usr/bin/env python3
"""
Genesis Memory System Benchmarks
================================
Performance benchmarks for memory operations.

Measures:
- Atomic I/O latency
- PostgreSQL operations (via Elestio)
- Surprise detection latency
- MCP sync operations
- Circuit breaker overhead

Usage:
    python benchmark.py run      # Run all benchmarks
    python benchmark.py quick    # Quick sanity check
    python benchmark.py report   # Show last results
"""

import json
import time
import tempfile
import statistics
from pathlib import Path
from datetime import datetime
from typing import Dict, List, Any

# Results path
RESULTS_PATH = Path("E:/genesis-system/data/benchmarks/baseline.json")


def benchmark(name: str, iterations: int = 100):
    """Decorator to benchmark a function."""
    def decorator(func):
        def wrapper(*args, **kwargs):
            times = []
            for _ in range(iterations):
                start = time.perf_counter()
                func(*args, **kwargs)
                elapsed = time.perf_counter() - start
                times.append(elapsed * 1000)  # Convert to ms

            return {
                "name": name,
                "iterations": iterations,
                "mean_ms": statistics.mean(times),
                "median_ms": statistics.median(times),
                "min_ms": min(times),
                "max_ms": max(times),
                "stdev_ms": statistics.stdev(times) if len(times) > 1 else 0,
                "p95_ms": sorted(times)[int(len(times) * 0.95)],
                "p99_ms": sorted(times)[int(len(times) * 0.99)],
            }
        return wrapper
    return decorator


class AtomicIOBenchmarks:
    """Benchmark atomic I/O operations."""

    def __init__(self):
        self.tmpdir = tempfile.mkdtemp()

    @benchmark("atomic_write_small", iterations=100)
    def write_small(self):
        from atomic_io import atomic_write
        path = Path(self.tmpdir) / "small.txt"
        atomic_write(path, "Hello, World!", backup=False)

    @benchmark("atomic_write_medium", iterations=50)
    def write_medium(self):
        from atomic_io import atomic_write
        path = Path(self.tmpdir) / "medium.txt"
        content = "x" * 10000  # 10KB
        atomic_write(path, content, backup=False)

    @benchmark("atomic_json_write", iterations=100)
    def json_write(self):
        from atomic_io import atomic_json_write
        path = Path(self.tmpdir) / "data.json"
        data = {"key": "value", "list": list(range(100))}
        atomic_json_write(path, data, backup=False)

    @benchmark("safe_read", iterations=100)
    def safe_read(self):
        from atomic_io import safe_read
        path = Path(self.tmpdir) / "data.json"
        safe_read(path, default="")

    def run_all(self) -> List[Dict]:
        results = []
        try:
            # Create test file first
            from atomic_io import atomic_json_write
            path = Path(self.tmpdir) / "data.json"
            atomic_json_write(path, {"key": "value"}, backup=False)

            results.append(self.write_small())
            results.append(self.write_medium())
            results.append(self.json_write())
            results.append(self.safe_read())
        except Exception as e:
            print(f"[!] AtomicIO benchmark error: {e}")
        return results


class PostgresBenchmarks:
    """Benchmark PostgreSQL via Elestio."""

    def __init__(self):
        self._setup()

    def _setup(self):
        import sys
        sys.path.append('/mnt/e/genesis-system/data/genesis-memory')
        from elestio_config import PostgresConfig
        import psycopg2
        self.conn_params = PostgresConfig.get_connection_params()
        conn = psycopg2.connect(**self.conn_params)
        cur = conn.cursor()
        cur.execute("""
            CREATE TABLE IF NOT EXISTS bm_memories (
                id TEXT PRIMARY KEY,
                content TEXT,
                score REAL
            )
        """)
        conn.commit()
        cur.close()
        conn.close()

    @benchmark("postgres_insert", iterations=100)
    def insert(self):
        import psycopg2
        conn = psycopg2.connect(**self.conn_params)
        cur = conn.cursor()
        cur.execute(
            "INSERT INTO bm_memories VALUES (%s, %s, %s) ON CONFLICT (id) DO UPDATE SET content = EXCLUDED.content, score = EXCLUDED.score",
            (f"mem_{time.time()}", "Test content", 0.5)
        )
        conn.commit()
        cur.close()
        conn.close()

    @benchmark("postgres_select", iterations=100)
    def select(self):
        import psycopg2
        conn = psycopg2.connect(**self.conn_params)
        cur = conn.cursor()
        cur.execute("SELECT * FROM bm_memories LIMIT 10")
        cur.fetchall()
        cur.close()
        conn.close()

    @benchmark("postgres_ilike_search", iterations=50)
    def ilike_search(self):
        import psycopg2
        conn = psycopg2.connect(**self.conn_params)
        cur = conn.cursor()
        cur.execute(
            "SELECT * FROM bm_memories WHERE content ILIKE %s",
            ("%test%",)
        )
        cur.fetchall()
        cur.close()
        conn.close()

    def run_all(self) -> List[Dict]:
        results = []
        try:
            results.append(self.insert())
            results.append(self.select())
            results.append(self.ilike_search())
        except Exception as e:
            print(f"[!] PostgreSQL benchmark error: {e}")
        return results


class SurpriseBenchmarks:
    """Benchmark surprise detection."""

    def __init__(self):
        self.tmpdir = tempfile.mkdtemp()
        self.detector = None

    def _setup(self):
        try:
            from enhanced_surprise import EnhancedSurpriseDetector
            self.detector = EnhancedSurpriseDetector(
                memory_path=f"{self.tmpdir}/vectors.json"
            )
            # Add some baseline memories
            for i in range(10):
                self.detector.add_to_memory(
                    f"Test memory content number {i}",
                    "benchmark",
                    "test"
                )
        except Exception as e:
            print(f"[!] Surprise setup failed: {e}")
            self.detector = None

    @benchmark("surprise_evaluate", iterations=20)
    def evaluate(self):
        if self.detector:
            self.detector.evaluate(
                "This is a new piece of content to evaluate",
                "benchmark",
                "test"
            )

    @benchmark("surprise_add_memory", iterations=20)
    def add_memory(self):
        if self.detector:
            self.detector.add_to_memory(
                f"New memory at {time.time()}",
                "benchmark",
                "test"
            )

    def run_all(self) -> List[Dict]:
        self._setup()
        results = []
        if self.detector:
            try:
                results.append(self.evaluate())
                results.append(self.add_memory())
            except Exception as e:
                print(f"[!] Surprise benchmark error: {e}")
        else:
            print("[!] Surprise detector not available, skipping benchmarks")
        return results


class CircuitBreakerBenchmarks:
    """Benchmark circuit breaker overhead."""

    @benchmark("circuit_breaker_check", iterations=1000)
    def check_available(self):
        from circuit_breaker import CircuitBreaker
        cb = CircuitBreaker("test", failure_threshold=5)
        _ = cb.is_available

    @benchmark("circuit_breaker_record_success", iterations=1000)
    def record_success(self):
        from circuit_breaker import CircuitBreaker
        cb = CircuitBreaker("test", failure_threshold=5)
        cb.record_success()

    def run_all(self) -> List[Dict]:
        results = []
        try:
            results.append(self.check_available())
            results.append(self.record_success())
        except Exception as e:
            print(f"[!] Circuit breaker benchmark error: {e}")
        return results


def run_all_benchmarks() -> Dict[str, Any]:
    """Run all benchmarks and return results."""
    print("=" * 60)
    print("Genesis Memory System Benchmarks")
    print("=" * 60)

    results = {
        "timestamp": datetime.now().isoformat(),
        "benchmarks": {}
    }

    # Atomic I/O
    print("\n[1/4] Atomic I/O Benchmarks...")
    atomic_bench = AtomicIOBenchmarks()
    results["benchmarks"]["atomic_io"] = atomic_bench.run_all()
    for r in results["benchmarks"]["atomic_io"]:
        print(f"  {r['name']}: {r['mean_ms']:.3f}ms (p95: {r['p95_ms']:.3f}ms)")

    # PostgreSQL
    print("\n[2/4] PostgreSQL (Elestio) Benchmarks...")
    pg_bench = PostgresBenchmarks()
    results["benchmarks"]["postgresql"] = pg_bench.run_all()
    for r in results["benchmarks"]["postgresql"]:
        print(f"  {r['name']}: {r['mean_ms']:.3f}ms (p95: {r['p95_ms']:.3f}ms)")

    # Surprise Detection
    print("\n[3/4] Surprise Detection Benchmarks...")
    surprise_bench = SurpriseBenchmarks()
    results["benchmarks"]["surprise"] = surprise_bench.run_all()
    for r in results["benchmarks"]["surprise"]:
        print(f"  {r['name']}: {r['mean_ms']:.3f}ms (p95: {r['p95_ms']:.3f}ms)")

    # Circuit Breaker
    print("\n[4/4] Circuit Breaker Benchmarks...")
    cb_bench = CircuitBreakerBenchmarks()
    results["benchmarks"]["circuit_breaker"] = cb_bench.run_all()
    for r in results["benchmarks"]["circuit_breaker"]:
        print(f"  {r['name']}: {r['mean_ms']:.3f}ms (p95: {r['p95_ms']:.3f}ms)")

    # Summary
    print("\n" + "=" * 60)
    print("Summary")
    print("=" * 60)

    total_benchmarks = sum(len(v) for v in results["benchmarks"].values())
    print(f"Total benchmarks run: {total_benchmarks}")

    # Save results
    try:
        RESULTS_PATH.parent.mkdir(parents=True, exist_ok=True)
        with open(RESULTS_PATH, 'w') as f:
            json.dump(results, f, indent=2)
        print(f"\nResults saved to: {RESULTS_PATH}")
    except Exception as e:
        print(f"Failed to save results: {e}")

    return results


def run_quick_check() -> bool:
    """Quick sanity check that modules load."""
    print("Quick module check...")

    checks = []

    try:
        from atomic_io import atomic_write, safe_read
        checks.append(("atomic_io", True))
    except Exception as e:
        checks.append(("atomic_io", False))
        print(f"  [!] atomic_io: {e}")

    try:
        from circuit_breaker import CircuitBreaker
        checks.append(("circuit_breaker", True))
    except Exception as e:
        checks.append(("circuit_breaker", False))
        print(f"  [!] circuit_breaker: {e}")

    try:
        from retry_utils import retry
        checks.append(("retry_utils", True))
    except Exception as e:
        checks.append(("retry_utils", False))
        print(f"  [!] retry_utils: {e}")

    try:
        from enhanced_surprise import EnhancedSurpriseDetector
        checks.append(("enhanced_surprise", True))
    except Exception as e:
        checks.append(("enhanced_surprise", False))
        print(f"  [!] enhanced_surprise: {e}")

    try:
        from reliable_mcp_sync import MCPSyncManager
        checks.append(("reliable_mcp_sync", True))
    except Exception as e:
        checks.append(("reliable_mcp_sync", False))
        print(f"  [!] reliable_mcp_sync: {e}")

    try:
        from logging_config import get_logger
        checks.append(("logging_config", True))
    except Exception as e:
        checks.append(("logging_config", False))
        print(f"  [!] logging_config: {e}")

    try:
        from metrics import GenesisMetrics
        checks.append(("metrics", True))
    except Exception as e:
        checks.append(("metrics", False))
        print(f"  [!] metrics: {e}")

    print("\nResults:")
    for name, ok in checks:
        status = "[OK]" if ok else "[FAIL]"
        print(f"  {status} {name}")

    passed = sum(1 for _, ok in checks if ok)
    total = len(checks)
    print(f"\n{passed}/{total} modules loaded successfully")

    return passed == total


if __name__ == "__main__":
    import sys

    if len(sys.argv) > 1:
        cmd = sys.argv[1]

        if cmd == "run":
            run_all_benchmarks()
        elif cmd == "quick":
            success = run_quick_check()
            sys.exit(0 if success else 1)
        elif cmd == "report":
            if RESULTS_PATH.exists():
                with open(RESULTS_PATH) as f:
                    results = json.load(f)
                print(json.dumps(results, indent=2))
            else:
                print("No benchmark results found. Run: python benchmark.py run")
        else:
            print(f"Unknown command: {cmd}")
            print("Usage: python benchmark.py [run|quick|report]")
    else:
        print("Genesis Memory System Benchmarks")
        print("Usage: python benchmark.py [run|quick|report]")
