import pytest
import os
import litellm

litellm.set_verbose = False

# Load environment variables if running locally
if not os.getenv("RUNPOD_API_KEY"): # Example check for environment variable
    from dotenv import load_dotenv
    load_dotenv()

# Define the models to test
models_to_test = [
    {"model_name": "gpt-3.5-turbo", "api_key_env_var": "OPENAI_API_KEY"},
    {"model_name": "claude-instant-1.2", "api_key_env_var": "ANTHROPIC_API_KEY"},
    {"model_name": "cohere/command-nightly", "api_key_env_var": "COHERE_API_KEY"},
    {"model_name": "mistralai/Mistral-7B-Instruct-v0.1", "api_key_env_var": "MISTRAL_API_KEY"},
    # Add more models as needed, with corresponding API key environment variables
]

@pytest.mark.parametrize("model_config", models_to_test)
def test_model_completion(model_config):
    model_name = model_config["model_name"]
    api_key_env_var = model_config["api_key_env_var"]

    # Check if the API key is available
    api_key = os.getenv(api_key_env_var)
    if not api_key:
        pytest.skip(f"Skipping {model_name} test because {api_key_env_var} is not set.")

    try:
        response = litellm.completion(
            model=model_name,
            messages=[{"content": "This is a test, respond with the word: 'test'", "role": "user"}],
        )

        # Assert that the response is not None
        assert response is not None

        # Assert that the response contains text
        assert "choices" in response
        assert len(response["choices"]) > 0
        assert "text" in response["choices"][0]

        # Assert that the response contains the expected text
        assert "test" in response["choices"][0]["text"].lower()

        print(f"Successfully tested model: {model_name}")

    except Exception as e:
        pytest.fail(f"Test failed for model {model_name} with error: {e}")


@pytest.mark.parametrize("model_config", models_to_test)
def test_model_embedding(model_config):
    model_name = model_config["model_name"]
    api_key_env_var = model_config["api_key_env_var"]

    # Skip embedding test if the model does not support embeddings
    if not hasattr(litellm, 'embedding'):
        pytest.skip(f"Skipping embedding test as litellm version doesn't support embeddings.")

    # Check if the API key is available
    api_key = os.getenv(api_key_env_var)
    if not api_key:
        pytest.skip(f"Skipping {model_name} embedding test because {api_key_env_var} is not set.")

    try:
        response = litellm.embedding(
            model=model_name,
            input=["This is a test sentence."]
        )

        # Assert that the response is not None
        assert response is not None

        # Assert that the response contains data
        assert "data" in response
        assert len(response["data"]) > 0
        assert "embedding" in response["data"][0]

        # Assert that the embedding is a list of floats
        embedding = response["data"][0]["embedding"]
        assert isinstance(embedding, list)
        assert all(isinstance(x, float) for x in embedding)

        print(f"Successfully tested embedding for model: {model_name}")

    except Exception as e:
        pytest.fail(f"Embedding test failed for model {model_name} with error: {e}")
