import pytest
import subprocess
import os
import shutil

@pytest.fixture(scope="module")
def project_setup():
    """
    Fixture to set up a minimal project environment for testing.
    This includes creating necessary directories and a dummy config file.
    """
    project_dir = "test_project"
    config_dir = os.path.join(project_dir, "config")
    data_dir = os.path.join(project_dir, "data")

    # Clean up if the directory exists from a previous failed run.
    if os.path.exists(project_dir):
        shutil.rmtree(project_dir)

    os.makedirs(config_dir, exist_ok=True)
    os.makedirs(data_dir, exist_ok=True)

    # Create a dummy config file.
    with open(os.path.join(config_dir, "default.yaml"), "w") as f:
        f.write("environment:\n  max_steps: 10\n")  # Minimal config

    yield project_dir

    # Teardown: Remove the created project directory.
    shutil.rmtree(project_dir)


def test_learning_pipeline(project_setup):
    """
    E2E test for the learning pipeline.  Executes the `rlm` CLI commands
    to initialize, train, and evaluate a model.
    """
    project_dir = project_setup

    # 1. Initialize the project.
    init_process = subprocess.run(
        ["rlm", "init", project_dir],
        capture_output=True,
        text=True,
        check=True,
    )
    assert "Project initialized successfully" in init_process.stdout

    # 2. Train the model (with a very small number of steps for testing).
    train_process = subprocess.run(
        ["rlm", "train", "--project_dir", project_dir, "--max_steps", "10"],  # Override max_steps
        capture_output=True,
        text=True,
        check=True,
    )
    assert "Training complete" in train_process.stdout

    # 3. Evaluate the model.
    evaluate_process = subprocess.run(
        ["rlm", "evaluate", "--project_dir", project_dir],
        capture_output=True,
        text=True,
        check=True,
    )
    assert "Evaluation complete" in evaluate_process.stdout