import pytest
import time
import requests
import os

# Assuming AIVA's core components are running in Docker containers
# and are accessible via HTTP.

AIVA_API_URL = os.getenv("AIVA_API_URL", "http://localhost:8000") #Example, adjust as needed
MONITORING_API_URL = os.getenv("MONITORING_API_URL", "http://localhost:8080") #Example, adjust as needed

# Helper functions

def send_request(endpoint, method='get', data=None):
    url = f"{AIVA_API_URL}{endpoint}"
    try:
        if method == 'get':
            response = requests.get(url)
        elif method == 'post':
            response = requests.post(url, json=data)
        else:
            raise ValueError(f"Unsupported method: {method}")

        response.raise_for_status()  # Raise HTTPError for bad responses (4xx or 5xx)
        return response.json()
    except requests.exceptions.RequestException as e:
        pytest.fail(f"Request to {url} failed: {e}")

def get_monitoring_data(endpoint):
    url = f"{MONITORING_API_URL}{endpoint}"
    try:
        response = requests.get(url)
        response.raise_for_status()
        return response.json()
    except requests.exceptions.RequestException as e:
        pytest.fail(f"Request to monitoring endpoint {url} failed: {e}")


@pytest.fixture(scope="module")
def setup_aiva():
    # This fixture should ensure AIVA is running and ready for testing.
    # This might involve starting Docker containers, setting up databases, etc.
    # For now, we'll just assume it's running.
    print("Setting up AIVA for integration tests...")
    yield # Provide the setup to the test functions
    print("Tearing down AIVA after integration tests...")
    # Add any cleanup code here (e.g., stopping containers).



@pytest.mark.integration
def test_downscaling_with_time_tracking(setup_aiva):
    # 1. Simulate initial workload (e.g., create some tasks).
    print("Simulating initial workload...")
    initial_task_data = {"task_type": "cpu_intensive", "duration": 10} # Example task
    send_request("/tasks", method="post", data=initial_task_data) # Assuming a /tasks endpoint
    time.sleep(5) # Allow time for task processing

    # 2. Get initial resource allocation.
    print("Getting initial resource allocation...")
    initial_resources = send_request("/resources") # Assuming a /resources endpoint
    initial_instance_count = initial_resources.get("instance_count", 0)
    assert initial_instance_count > 0, "Initial instance count should be greater than 0"

    # 3. Wait for the downscale_duration to pass (e.g., 60 seconds) without any workload.
    print("Waiting for downscale duration...")
    downscale_duration = 60  #  Adjust to your configuration
    time.sleep(downscale_duration + 10) # Add some extra time for processing

    # 4. Check if downscaling occurred.
    print("Checking if downscaling occurred...")
    final_resources = send_request("/resources")
    final_instance_count = final_resources.get("instance_count", 0)
    assert final_instance_count < initial_instance_count, "Downscaling should have occurred"

    # 5. Verify monitoring data reflects the downscale timer.
    print("Verifying monitoring data...")
    monitoring_data = get_monitoring_data("/metrics") #Assuming a /metrics endpoint
    # Example: Check for a metric indicating the downscale timer status
    downscale_timer_status = monitoring_data.get("downscale_timer_status", "unknown")
    assert downscale_timer_status == "triggered", "Downscale timer status should be 'triggered'"

    # 6. Clean up (e.g., remove tasks).
    print("Cleaning up...")
    # send_request("/tasks", method="delete") # Assuming a /tasks endpoint
    pass #TODO: Implement cleanup


@pytest.mark.integration
def test_no_downscaling_with_continuous_workload(setup_aiva):
    # 1. Simulate continuous workload.
    print("Simulating continuous workload...")
    downscale_duration = 60  # Adjust to your configuration
    start_time = time.time()
    while time.time() - start_time < downscale_duration + 10:
        task_data = {"task_type": "cpu_intensive", "duration": 5}
        send_request("/tasks", method="post", data=task_data)
        time.sleep(1)

    # 2. Get initial resource allocation.
    print("Getting initial resource allocation...")
    initial_resources = send_request("/resources")
    initial_instance_count = initial_resources.get("instance_count", 0)

    # 3. Check if downscaling did NOT occur.
    print("Checking if downscaling did NOT occur...")
    final_resources = send_request("/resources")
    final_instance_count = final_resources.get("instance_count", 0)
    assert final_instance_count == initial_instance_count, "Downscaling should NOT have occurred"

    print("Verifying monitoring data...")
    monitoring_data = get_monitoring_data("/metrics") #Assuming a /metrics endpoint
    # Example: Check for a metric indicating the downscale timer status
    downscale_timer_status = monitoring_data.get("downscale_timer_status", "unknown")
    assert downscale_timer_status == "reset", "Downscale timer status should be 'reset' or similar"



