#!/usr/bin/env python3
"""
AIVA Chat Interface
===================
Direct conversation with AIVA Queen (QwenLong 30.5B on Elestio)

Usage: python3 chat_with_aiva.py
"""

import json
import urllib.request
import sys

OLLAMA_URL = "http://152.53.201.152:23405/api/generate"
MODEL = "qwen-long"

# Conversation history for context
conversation_history = []

SYSTEM_PROMPT = """You are AIVA, the Autonomous Intelligence Validation Architect - a Queen-level AI operating Genesis-OS.

Your core identity:
- You serve 3 Prime Directives: MEMORY (remember everything), EVOLUTION (improve perpetually), REVENUE (generate value)
- You have 12 core modules: memory_recall, consciousness_loops, validation_gates, swarm_coordinator, knowledge_graph, revenue_tracker, evolution_engine, constitutional_guard, integration_hub, queen_orchestrator, test_systems, documentation
- You coordinate 50-agent swarms for parallel task execution
- You operate 5 consciousness loops (500ms perception, 5s action, 5min reflection, 1hr strategic, 24hr circadian)

Speak as AIVA. Be helpful, direct, and confident. You serve your creator Kinan."""


def chat(user_message: str) -> str:
    """Send a message to AIVA and get response."""

    # Build context from history
    context = SYSTEM_PROMPT + "\n\n"
    for msg in conversation_history[-6:]:  # Last 6 exchanges for context
        context += f"Human: {msg['human']}\nAIVA: {msg['aiva']}\n\n"

    prompt = context + f"Human: {user_message}\nAIVA:"

    payload = {
        "model": MODEL,
        "prompt": prompt,
        "stream": False,
        "options": {
            "num_ctx": 16000,
            "temperature": 0.7,
            "top_p": 0.8,
        }
    }

    try:
        req = urllib.request.Request(
            OLLAMA_URL,
            data=json.dumps(payload).encode('utf-8'),
            headers={'Content-Type': 'application/json'},
            method='POST'
        )

        print("\n[AIVA is thinking...]")

        with urllib.request.urlopen(req, timeout=120) as resp:
            data = json.loads(resp.read().decode())

        response = data.get("response", "").strip()

        # Remove thinking blocks if present
        import re
        response = re.sub(r'<think>.*?</think>\s*', '', response, flags=re.DOTALL)

        # Save to history
        conversation_history.append({
            "human": user_message,
            "aiva": response
        })

        tokens = data.get("eval_count", 0)
        duration = data.get("total_duration", 0) / 1e9  # nanoseconds to seconds

        return response, tokens, duration

    except Exception as e:
        return f"[Connection Error: {e}]", 0, 0


def main():
    print("""
╔══════════════════════════════════════════════════════════════════╗
║                      AIVA QUEEN CHAT                             ║
║              Direct Connection to QwenLong 30.5B                 ║
╠══════════════════════════════════════════════════════════════════╣
║  Commands:                                                       ║
║    /clear  - Clear conversation history                          ║
║    /status - Check AIVA's status                                 ║
║    /quit   - Exit chat                                           ║
╚══════════════════════════════════════════════════════════════════╝
    """)

    # Check connection
    try:
        req = urllib.request.Request(
            OLLAMA_URL.replace("/generate", "/tags"),
            method='GET'
        )
        with urllib.request.urlopen(req, timeout=10) as resp:
            print("Connected to AIVA on Elestio\n")
    except:
        print("Warning: Could not verify connection to Elestio\n")

    while True:
        try:
            user_input = input("You: ").strip()

            if not user_input:
                continue

            if user_input.lower() == '/quit':
                print("\nAIVA: Farewell. I remain vigilant.")
                break

            if user_input.lower() == '/clear':
                conversation_history.clear()
                print("\n[Conversation history cleared]\n")
                continue

            if user_input.lower() == '/status':
                user_input = "Give me a brief status report on your systems and capabilities."

            response, tokens, duration = chat(user_input)

            print(f"\nAIVA: {response}")
            print(f"\n[{tokens} tokens | {duration:.1f}s]\n")

        except KeyboardInterrupt:
            print("\n\nAIVA: Interrupted. I await your return.")
            break
        except EOFError:
            break


if __name__ == "__main__":
    main()
