#!/usr/bin/env python3
"""
MiniMax M2.5 Connectivity Test Script
Tests NVIDIA NIM free API access to MiniMax models.

URGENT: Free credits expire Feb 19, 2026 (4 days)

Usage:
    python test_minimax.py

Requirements:
    pip install openai requests python-dotenv

Environment Variables:
    NVIDIA_NIM_API_KEY - Your NVIDIA NIM API key (get from build.nvidia.com)
"""

import os
import sys
import json
from datetime import datetime
from typing import Dict, List

# Add Genesis system path
sys.path.append('/mnt/e/genesis-system')

try:
    import openai
    import requests
except ImportError:
    print("ERROR: Missing dependencies. Install with:")
    print("  pip install openai requests python-dotenv")
    sys.exit(1)

# Try to load from .env file
try:
    from dotenv import load_dotenv
    load_dotenv('/mnt/e/genesis-system/.env')
except ImportError:
    pass

# Configuration
NVIDIA_NIM_API_KEY = os.getenv('NVIDIA_NIM_API_KEY')
NVIDIA_BASE_URL = "https://api.nvidia.com/v1"

# Test results log
TEST_RESULTS = {
    "timestamp": datetime.now().isoformat(),
    "tests": []
}


def log_test(test_name: str, status: str, details: Dict = None):
    """Log test result."""
    result = {
        "test": test_name,
        "status": status,
        "timestamp": datetime.now().isoformat(),
        "details": details or {}
    }
    TEST_RESULTS["tests"].append(result)

    # Print to console
    status_icon = "✅" if status == "PASS" else "❌" if status == "FAIL" else "⚠️"
    print(f"{status_icon} {test_name}: {status}")
    if details:
        for key, value in details.items():
            print(f"   {key}: {value}")
    print()


def test_api_key():
    """Test 1: Verify API key is configured."""
    print("=" * 80)
    print("TEST 1: API Key Configuration")
    print("=" * 80)

    if not NVIDIA_NIM_API_KEY:
        log_test(
            "API Key Check",
            "FAIL",
            {
                "error": "NVIDIA_NIM_API_KEY not found in environment",
                "instructions": "Get key from https://build.nvidia.com",
                "setup": "export NVIDIA_NIM_API_KEY='your_key_here'"
            }
        )
        return False

    log_test(
        "API Key Check",
        "PASS",
        {"key_length": len(NVIDIA_NIM_API_KEY), "key_prefix": NVIDIA_NIM_API_KEY[:10] + "..."}
    )
    return True


def test_minimax_m21_basic():
    """Test 2: Basic text generation with MiniMax M2.1."""
    print("=" * 80)
    print("TEST 2: MiniMax M2.1 Basic Generation")
    print("=" * 80)

    if not NVIDIA_NIM_API_KEY:
        log_test("MiniMax M2.1 Basic", "SKIP", {"reason": "API key not configured"})
        return False

    try:
        client = openai.OpenAI(
            base_url=NVIDIA_BASE_URL,
            api_key=NVIDIA_NIM_API_KEY
        )

        prompt = "Write a simple Python function to check if a number is prime."

        print(f"Prompt: {prompt}")
        print("Calling API...")

        response = client.chat.completions.create(
            model="minimaxai/minimax-m2.1",
            messages=[
                {"role": "system", "content": "You are a helpful coding assistant."},
                {"role": "user", "content": prompt}
            ],
            temperature=0.7,
            max_tokens=512
        )

        result = response.choices[0].message.content
        print(f"Response:\n{result}\n")

        log_test(
            "MiniMax M2.1 Basic",
            "PASS",
            {
                "model": "minimaxai/minimax-m2.1",
                "tokens_used": response.usage.total_tokens if hasattr(response, 'usage') else "N/A",
                "response_length": len(result),
                "response_preview": result[:100] + "..."
            }
        )
        return True

    except Exception as e:
        log_test(
            "MiniMax M2.1 Basic",
            "FAIL",
            {"error": str(e), "error_type": type(e).__name__}
        )
        return False


def test_minimax_m2_basic():
    """Test 3: Basic text generation with MiniMax M2."""
    print("=" * 80)
    print("TEST 3: MiniMax M2 Basic Generation")
    print("=" * 80)

    if not NVIDIA_NIM_API_KEY:
        log_test("MiniMax M2 Basic", "SKIP", {"reason": "API key not configured"})
        return False

    try:
        client = openai.OpenAI(
            base_url=NVIDIA_BASE_URL,
            api_key=NVIDIA_NIM_API_KEY
        )

        prompt = "Explain async/await in Python in 2 sentences."

        print(f"Prompt: {prompt}")
        print("Calling API...")

        response = client.chat.completions.create(
            model="minimaxai/minimax-m2",
            messages=[
                {"role": "user", "content": prompt}
            ],
            temperature=0.7,
            max_tokens=256
        )

        result = response.choices[0].message.content
        print(f"Response:\n{result}\n")

        log_test(
            "MiniMax M2 Basic",
            "PASS",
            {
                "model": "minimaxai/minimax-m2",
                "tokens_used": response.usage.total_tokens if hasattr(response, 'usage') else "N/A",
                "response_length": len(result),
                "response": result
            }
        )
        return True

    except Exception as e:
        log_test(
            "MiniMax M2 Basic",
            "FAIL",
            {"error": str(e), "error_type": type(e).__name__}
        )
        return False


def test_coding_capability():
    """Test 4: Coding capability (SWE-Bench style task)."""
    print("=" * 80)
    print("TEST 4: Coding Capability (SWE-Bench Style)")
    print("=" * 80)

    if not NVIDIA_NIM_API_KEY:
        log_test("Coding Capability", "SKIP", {"reason": "API key not configured"})
        return False

    try:
        client = openai.OpenAI(
            base_url=NVIDIA_BASE_URL,
            api_key=NVIDIA_NIM_API_KEY
        )

        prompt = """Write a Python function that implements a LRU cache with the following requirements:
- Max capacity of N items
- get(key) returns value or None
- put(key, value) adds item, evicts least recently used if at capacity
- Uses OrderedDict for O(1) operations"""

        print(f"Prompt: {prompt[:100]}...")
        print("Calling API...")

        response = client.chat.completions.create(
            model="minimaxai/minimax-m2.1",
            messages=[
                {"role": "system", "content": "You are an expert Python engineer. Write clean, efficient code."},
                {"role": "user", "content": prompt}
            ],
            temperature=0.5,
            max_tokens=1024
        )

        result = response.choices[0].message.content
        print(f"Response:\n{result}\n")

        # Basic validation
        has_class = "class" in result.lower()
        has_get = "def get" in result.lower()
        has_put = "def put" in result.lower()

        log_test(
            "Coding Capability",
            "PASS" if (has_class and has_get and has_put) else "WARN",
            {
                "model": "minimaxai/minimax-m2.1",
                "tokens_used": response.usage.total_tokens if hasattr(response, 'usage') else "N/A",
                "contains_class": has_class,
                "contains_get": has_get,
                "contains_put": has_put,
                "response_length": len(result)
            }
        )
        return True

    except Exception as e:
        log_test(
            "Coding Capability",
            "FAIL",
            {"error": str(e), "error_type": type(e).__name__}
        )
        return False


def test_rate_limits():
    """Test 5: Rate limit check (40 RPM for free tier)."""
    print("=" * 80)
    print("TEST 5: Rate Limit Check (5 rapid requests)")
    print("=" * 80)

    if not NVIDIA_NIM_API_KEY:
        log_test("Rate Limit Check", "SKIP", {"reason": "API key not configured"})
        return False

    try:
        client = openai.OpenAI(
            base_url=NVIDIA_BASE_URL,
            api_key=NVIDIA_NIM_API_KEY
        )

        successes = 0
        failures = 0
        start_time = datetime.now()

        for i in range(5):
            try:
                print(f"Request {i+1}/5...", end=" ")
                response = client.chat.completions.create(
                    model="minimaxai/minimax-m2.1",
                    messages=[{"role": "user", "content": f"Test {i+1}"}],
                    max_tokens=50
                )
                print("✅")
                successes += 1
            except Exception as e:
                print(f"❌ {str(e)[:50]}")
                failures += 1

        end_time = datetime.now()
        duration = (end_time - start_time).total_seconds()

        log_test(
            "Rate Limit Check",
            "PASS" if successes >= 4 else "WARN",
            {
                "total_requests": 5,
                "successes": successes,
                "failures": failures,
                "duration_seconds": duration,
                "free_tier_limit": "40 RPM",
                "status": "Within limits" if successes >= 4 else "May be rate limited"
            }
        )
        return True

    except Exception as e:
        log_test(
            "Rate Limit Check",
            "FAIL",
            {"error": str(e), "error_type": type(e).__name__}
        )
        return False


def test_comparison_gemini():
    """Test 6: Quality comparison (MiniMax vs Gemini-style task)."""
    print("=" * 80)
    print("TEST 6: Quality Comparison (Architecture Question)")
    print("=" * 80)

    if not NVIDIA_NIM_API_KEY:
        log_test("Quality Comparison", "SKIP", {"reason": "API key not configured"})
        return False

    try:
        client = openai.OpenAI(
            base_url=NVIDIA_BASE_URL,
            api_key=NVIDIA_NIM_API_KEY
        )

        prompt = """Design a microservices architecture for an AI voice assistant platform that needs to handle:
1. Real-time voice calls (Telnyx)
2. LLM orchestration (multiple models)
3. Memory persistence (PostgreSQL, Qdrant, Redis)
4. Workflow automation (n8n)

Provide: 3 core services, their responsibilities, and how they communicate."""

        print(f"Prompt: {prompt[:100]}...")
        print("Calling API...")

        response = client.chat.completions.create(
            model="minimaxai/minimax-m2.1",
            messages=[
                {"role": "system", "content": "You are an expert solutions architect."},
                {"role": "user", "content": prompt}
            ],
            temperature=0.7,
            max_tokens=1024
        )

        result = response.choices[0].message.content
        print(f"Response:\n{result}\n")

        # Basic quality checks
        has_services = result.count("Service") >= 3 or result.count("service") >= 3
        has_architecture = "architecture" in result.lower()
        has_communication = "communication" in result.lower() or "api" in result.lower()

        log_test(
            "Quality Comparison",
            "PASS" if (has_services and has_architecture) else "WARN",
            {
                "model": "minimaxai/minimax-m2.1",
                "tokens_used": response.usage.total_tokens if hasattr(response, 'usage') else "N/A",
                "mentions_services": has_services,
                "mentions_architecture": has_architecture,
                "mentions_communication": has_communication,
                "response_length": len(result),
                "assessment": "Genesis-relevant architecture response"
            }
        )
        return True

    except Exception as e:
        log_test(
            "Quality Comparison",
            "FAIL",
            {"error": str(e), "error_type": type(e).__name__}
        )
        return False


def save_results():
    """Save test results to file."""
    output_file = "/mnt/e/genesis-system/docs/MINIMAX_TEST_RESULTS.json"

    # Add summary
    total_tests = len(TEST_RESULTS["tests"])
    passed = len([t for t in TEST_RESULTS["tests"] if t["status"] == "PASS"])
    failed = len([t for t in TEST_RESULTS["tests"] if t["status"] == "FAIL"])
    skipped = len([t for t in TEST_RESULTS["tests"] if t["status"] == "SKIP"])
    warned = len([t for t in TEST_RESULTS["tests"] if t["status"] == "WARN"])

    TEST_RESULTS["summary"] = {
        "total_tests": total_tests,
        "passed": passed,
        "failed": failed,
        "skipped": skipped,
        "warned": warned,
        "success_rate": f"{(passed/total_tests*100):.1f}%" if total_tests > 0 else "0%"
    }

    with open(output_file, "w") as f:
        json.dump(TEST_RESULTS, f, indent=2)

    print("=" * 80)
    print("TEST SUMMARY")
    print("=" * 80)
    print(f"Total Tests: {total_tests}")
    print(f"✅ Passed: {passed}")
    print(f"❌ Failed: {failed}")
    print(f"⚠️  Warned: {warned}")
    print(f"⏭️  Skipped: {skipped}")
    print(f"Success Rate: {TEST_RESULTS['summary']['success_rate']}")
    print(f"\nResults saved to: {output_file}")
    print("=" * 80)


def main():
    """Run all tests."""
    print("\n" + "=" * 80)
    print("MINIMAX M2.5 CONNECTIVITY TEST SUITE")
    print("=" * 80)
    print(f"Timestamp: {datetime.now().isoformat()}")
    print(f"URGENT: Free credits expire Feb 19, 2026 (4 days)")
    print("=" * 80)
    print()

    # Run tests in sequence
    tests = [
        test_api_key,
        test_minimax_m21_basic,
        test_minimax_m2_basic,
        test_coding_capability,
        test_rate_limits,
        test_comparison_gemini
    ]

    for test_func in tests:
        try:
            test_func()
        except Exception as e:
            print(f"CRITICAL ERROR in {test_func.__name__}: {e}")
            log_test(
                test_func.__name__,
                "FAIL",
                {"critical_error": str(e)}
            )

    # Save results
    save_results()

    # Recommendations
    print("\nRECOMMENDATIONS:")
    print("=" * 80)

    passed_count = len([t for t in TEST_RESULTS["tests"] if t["status"] == "PASS"])

    if passed_count >= 4:
        print("✅ MiniMax API is WORKING via NVIDIA NIM")
        print("✅ NEXT STEPS:")
        print("   1. Integrate MiniMax M2.1 into Genesis Execution Layer")
        print("   2. Add to core/gemini_rate_maximizer.py for coding tasks")
        print("   3. Test with real Genesis atomic stories")
        print("   4. Compare quality vs Gemini Flash for code generation")
        print("   5. Set up Kilo.ai account before Feb 19 for M2.5 access")
    elif passed_count >= 2:
        print("⚠️  MiniMax API PARTIALLY WORKING")
        print("⚠️  NEXT STEPS:")
        print("   1. Review failed tests above")
        print("   2. Check NVIDIA NIM API key validity")
        print("   3. Verify account has free credits remaining")
        print("   4. Retry failed tests individually")
    else:
        print("❌ MiniMax API NOT WORKING")
        print("❌ TROUBLESHOOTING:")
        print("   1. Get API key from https://build.nvidia.com")
        print("   2. Set environment variable: export NVIDIA_NIM_API_KEY='your_key'")
        print("   3. Check free credits balance on NVIDIA NIM dashboard")
        print("   4. Verify network connectivity to api.nvidia.com")

    print("=" * 80)
    print(f"\nFull results: /mnt/e/genesis-system/docs/MINIMAX_TEST_RESULTS.json")
    print(f"Activation guide: /mnt/e/genesis-system/docs/MINIMAX_M25_ACTIVATION.md")
    print()


if __name__ == "__main__":
    main()
