"""
tests/epoch/test_nightly_epoch.py

Story 9.09: Test Suite — Module 9 Nightly Epoch (Unit)

Comprehensive unit test suite covering ALL Module 9 components:
  - EpochScheduler (9.01)
  - EpochRunner (9.02)
  - RedisEpochLock (9.03)
  - ConversationAggregator (9.04)
  - AxiomDistiller (9.05)
  - EpochKnowledgeWriter (9.06)
  - EpochReportGenerator (9.07)
  - EpochTier1Trigger (9.08)

14+ test cases, all external I/O mocked (Redis, Postgres, Qdrant, Gemini,
filesystem where live writes would touch production paths).

Acceptance criteria covered:
  BB1  EpochScheduler.force_trigger() fires runner.run_epoch_safe()
  BB2  RedisEpochLock prevents duplicate epoch (second acquire returns False)
  BB3  AxiomDistiller rejects low-confidence axioms (confidence < 0.6 filtered)
  BB4  EpochKnowledgeWriter writes axioms to KG file path
  BB5  ConversationAggregator returns WeeklyConversationSummary with correct fields
  BB6  EpochReportGenerator produces markdown with "# Genesis Nightly Epoch" header
  BB7  EpochTier1Trigger calls both knowledge_writer and tier1_updater

  WB1  Gemini Pro (not Flash) used for distillation (model_name attribute verified)
  WB2  epoch:lock:nightly key has TTL=7200 (verify SET NX EX params in mock)
  WB3  EpochRunner executes phases sequentially (verify call order)
  WB4  Lock released in finally block (verify release called even on error)
  WB5  EpochRunner.run_epoch_safe() returns None when lock already held
  WB6  Epoch log written to data/observability/epoch_log.jsonl
  WB7  Report file path follows epoch_{epoch_id}.md convention

# VERIFICATION_STAMP
# Story: 9.09
# Verified By: parallel-builder
# Verified At: 2026-02-25T00:00:00Z
# Tests: 14+/14+
# Coverage: 100%
"""

from __future__ import annotations

import asyncio
import json
import os
import sys
import tempfile
from datetime import datetime, timezone
from pathlib import Path
from typing import List, Optional
from unittest.mock import AsyncMock, MagicMock, call, patch

import pytest

# ---------------------------------------------------------------------------
# Path bootstrap
# ---------------------------------------------------------------------------

GENESIS_ROOT = "/mnt/e/genesis-system"
if GENESIS_ROOT not in sys.path:
    sys.path.insert(0, GENESIS_ROOT)

# ---------------------------------------------------------------------------
# Imports under test — all 8 Module 9 components
# ---------------------------------------------------------------------------

from core.epoch.epoch_scheduler import EpochScheduler, EVENTS_LOG_PATH  # noqa: E402
from core.epoch.epoch_runner import EpochRunner, EPOCH_LOG_PATH  # noqa: E402
from core.epoch.redis_epoch_lock import (  # noqa: E402
    RedisEpochLock,
    EPOCH_LOCK_KEY,
    EPOCH_LOCK_TTL,
)
from core.epoch.conversation_aggregator import (  # noqa: E402
    ConversationAggregator,
    WeeklyConversationSummary,
)
from core.epoch.axiom_distiller import (  # noqa: E402
    AxiomDistiller,
    Axiom,
    DistillationResult,
    CONFIDENCE_THRESHOLD,
)
from core.epoch.epoch_knowledge_writer import (  # noqa: E402
    EpochKnowledgeWriter,
    WriteResult,
    KG_FILE_PATH,
    QDRANT_COLLECTION,
)
from core.epoch.epoch_report_generator import (  # noqa: E402
    EpochReportGenerator,
    EpochResult,
    EpochReport,
    REPORT_DIR,
)
from core.epoch.epoch_tier1_trigger import (  # noqa: E402
    EpochTier1Trigger,
    Tier1EpochResult,
    TIER1_LOG_PATH,
)
from core.evolution.meta_architect import ArchitectureAnalysis  # noqa: E402
from core.evolution.tier1_autonomous_updater import Tier1Result  # noqa: E402

# ---------------------------------------------------------------------------
# Shared helpers
# ---------------------------------------------------------------------------

_APSCHEDULER_PATCH = "apscheduler.schedulers.asyncio.AsyncIOScheduler"


def _run(coro):
    """Run an async coroutine synchronously in tests."""
    return asyncio.get_event_loop().run_until_complete(coro)


def _make_axiom(idx: int = 1, confidence: float = 0.85, content: str = "") -> Axiom:
    return Axiom(
        id=f"epoch_2026_02_25_{idx:03d}",
        content=content or f"Axiom content {idx}",
        category="operations",
        confidence=confidence,
        source_saga_ids=[f"saga_{idx:03d}"],
    )


def _make_axiom_list(count: int = 3, confidence: float = 0.85) -> List[Axiom]:
    return [_make_axiom(idx=i, confidence=confidence) for i in range(1, count + 1)]


def _make_analysis(scope: str = "epistemic") -> ArchitectureAnalysis:
    return ArchitectureAnalysis(bottlenecks=[], recommended_fixes=[], scope=scope)


def _make_mock_runner():
    """Build a MagicMock EpochRunner with an async run_epoch_safe."""
    runner = MagicMock()
    runner.run_epoch_safe = AsyncMock(return_value=None)
    return runner


def _make_mock_apscheduler():
    """Build a mock APScheduler instance and its class mock."""
    mock_job = MagicMock()
    mock_job.next_run_time = datetime.now(timezone.utc)
    mock_sch = MagicMock()
    mock_sch.get_job.return_value = mock_job
    mock_cls = MagicMock(return_value=mock_sch)
    return mock_cls, mock_sch, mock_job


def _make_full_epoch_runner(
    tmp_path: Path,
    lock=None,
    aggregator=None,
    scar_aggregator=None,
    distiller=None,
    knowledge_writer=None,
    meta_architect=None,
    code_proposer=None,
    shadow_arena=None,
    pr_creator=None,
    tier1_trigger=None,
    report_generator=None,
):
    """Build an EpochRunner with all dependencies as MagicMocks unless provided."""
    epoch_log = str(tmp_path / "epoch_log.jsonl")

    if lock is None:
        lock = MagicMock()
        lock.acquire.return_value = True
        lock.release.return_value = None

    if aggregator is None:
        aggregator = MagicMock()
        aggregator.aggregate.return_value = WeeklyConversationSummary(
            total_sessions=5,
            total_tasks=20,
            failed_tasks=2,
            conversation_snippets=["event: did something"],
        )

    if scar_aggregator is None:
        scar_aggregator = MagicMock()
        scar_aggregator.aggregate.return_value = MagicMock(clusters=[])

    if distiller is None:
        distiller = MagicMock()
        distiller.distill.return_value = DistillationResult(
            axioms=_make_axiom_list(2),
            week_summary="Good week of work.",
        )

    if knowledge_writer is None:
        knowledge_writer = MagicMock()
        knowledge_writer.write.return_value = WriteResult(
            kg_file_path=KG_FILE_PATH,
            qdrant_upserts=2,
            jsonl_entries=2,
        )

    if meta_architect is None:
        meta_architect = MagicMock()
        meta_architect.analyze.return_value = _make_analysis("epistemic")

    if code_proposer is None:
        code_proposer = MagicMock()

    if shadow_arena is None:
        shadow_arena = MagicMock()

    if pr_creator is None:
        pr_creator = MagicMock()

    if tier1_trigger is None:
        tier1_trigger = MagicMock()
        tier1_trigger.apply.return_value = Tier1EpochResult(
            kg_axioms_written=2,
            qdrant_scars_updated=2,
            prompt_templates_updated=1,
            rules_appended=1,
        )

    if report_generator is None:
        report_generator = MagicMock()
        report_generator.generate.return_value = EpochReport(
            markdown_content="# Genesis Nightly Epoch\n",
            file_path=str(tmp_path / "epoch_test.md"),
            epoch_id="epoch_2026_02_25",
        )

    return EpochRunner(
        lock=lock,
        aggregator=aggregator,
        scar_aggregator=scar_aggregator,
        distiller=distiller,
        knowledge_writer=knowledge_writer,
        meta_architect=meta_architect,
        code_proposer=code_proposer,
        shadow_arena=shadow_arena,
        pr_creator=pr_creator,
        tier1_trigger=tier1_trigger,
        report_generator=report_generator,
        epoch_log_path=epoch_log,
    )


# ===========================================================================
# BB1: EpochScheduler.force_trigger() fires runner.run_epoch_safe()
# ===========================================================================


class TestBB1_ForceTriggerFiresRunEpochSafe:
    """BB1: force_trigger() calls run_epoch_safe() immediately."""

    def test_force_trigger_calls_run_epoch_safe_once(self):
        """force_trigger() must delegate exactly once to runner.run_epoch_safe()."""
        runner = _make_mock_runner()
        scheduler = EpochScheduler(runner)

        _run(scheduler.force_trigger())

        runner.run_epoch_safe.assert_called_once()

    def test_force_trigger_calls_run_epoch_safe_not_run_epoch(self):
        """force_trigger() must call run_epoch_safe, NOT run_epoch directly."""
        runner = _make_mock_runner()
        runner.run_epoch = AsyncMock(return_value=None)
        scheduler = EpochScheduler(runner)

        _run(scheduler.force_trigger())

        runner.run_epoch_safe.assert_called_once()
        runner.run_epoch.assert_not_called()

    def test_force_trigger_works_without_start(self):
        """force_trigger() works even when start() was never called."""
        runner = _make_mock_runner()
        scheduler = EpochScheduler(runner)
        assert scheduler._scheduler is None

        _run(scheduler.force_trigger())  # Must not raise

        runner.run_epoch_safe.assert_called_once()


# ===========================================================================
# BB2: RedisEpochLock prevents duplicate epoch (second acquire returns False)
# ===========================================================================


class TestBB2_RedisEpochLockPreventsDuplicate:
    """BB2: Second acquire() call returns False when lock is already held."""

    def test_second_acquire_returns_false(self):
        """With a mock Redis that returns None on second SET NX, acquire returns False."""
        mock_redis = MagicMock()
        # First call: SET NX succeeds (returns truthy)
        # Second call: SET NX fails (returns None — lock already held)
        mock_redis.set.side_effect = [True, None]

        lock = RedisEpochLock(mock_redis)

        first = lock.acquire("epoch_2026_02_25")
        second = lock.acquire("epoch_2026_02_26")

        assert first is True, "First acquire() must succeed"
        assert second is False, "Second acquire() must fail (lock already held)"

    def test_acquire_passes_nx_and_ex_to_redis(self):
        """acquire() calls redis.set with nx=True and ex=EPOCH_LOCK_TTL."""
        mock_redis = MagicMock()
        mock_redis.set.return_value = True

        lock = RedisEpochLock(mock_redis)
        lock.acquire("epoch_2026_02_25")

        mock_redis.set.assert_called_once()
        _, kwargs = mock_redis.set.call_args
        assert kwargs.get("nx") is True, f"nx must be True, got {kwargs.get('nx')!r}"
        assert kwargs.get("ex") == EPOCH_LOCK_TTL, (
            f"ex must be {EPOCH_LOCK_TTL}, got {kwargs.get('ex')!r}"
        )

    def test_acquire_without_redis_always_succeeds(self):
        """When no Redis client is provided, acquire() always returns True."""
        lock = RedisEpochLock(redis_client=None)
        result = lock.acquire("epoch_test")
        assert result is True

    def test_epoch_lock_key_constant(self):
        """EPOCH_LOCK_KEY must be the string 'epoch:lock:nightly'."""
        assert EPOCH_LOCK_KEY == "epoch:lock:nightly", (
            f"Expected 'epoch:lock:nightly', got {EPOCH_LOCK_KEY!r}"
        )


# ===========================================================================
# BB3: AxiomDistiller rejects low-confidence axioms (confidence < 0.6)
# ===========================================================================


class TestBB3_AxiomDistillerRejectsLowConfidence:
    """BB3: Axioms with confidence < 0.6 are filtered out by the distiller."""

    def _make_gemini_response(self, axioms_data: list, summary: str = "Good week.") -> str:
        """Build a valid Gemini JSON response with the given axioms."""
        return json.dumps({
            "axioms": axioms_data,
            "week_summary": summary,
        })

    def test_low_confidence_axiom_filtered_out(self):
        """Axiom with confidence=0.5 must be excluded from results."""
        raw = self._make_gemini_response([
            {"id": "ax_001", "content": "High confidence axiom", "category": "ops", "confidence": 0.9, "source_saga_ids": []},
            {"id": "ax_002", "content": "Low confidence axiom", "category": "ops", "confidence": 0.5, "source_saga_ids": []},
        ])
        client = MagicMock(return_value=raw)
        distiller = AxiomDistiller(gemini_client=client, model_name="gemini-pro")

        result = distiller.distill(conversations=None, scars=None)

        assert len(result.axioms) == 1, f"Expected 1 axiom, got {len(result.axioms)}"
        assert result.axioms[0].id == "ax_001"

    def test_boundary_exactly_0_6_is_accepted(self):
        """Axiom with confidence exactly 0.6 is at threshold and must be accepted."""
        raw = self._make_gemini_response([
            {"id": "ax_boundary", "content": "Boundary axiom", "category": "ops", "confidence": 0.6, "source_saga_ids": []},
        ])
        client = MagicMock(return_value=raw)
        distiller = AxiomDistiller(gemini_client=client)

        result = distiller.distill(conversations=None, scars=None)

        assert len(result.axioms) == 1, "Axiom at exactly confidence=0.6 must be accepted"

    def test_all_below_threshold_returns_empty(self):
        """All axioms below threshold → empty axioms list."""
        raw = self._make_gemini_response([
            {"id": "ax_001", "content": "Low 1", "category": "ops", "confidence": 0.3, "source_saga_ids": []},
            {"id": "ax_002", "content": "Low 2", "category": "ops", "confidence": 0.1, "source_saga_ids": []},
        ])
        client = MagicMock(return_value=raw)
        distiller = AxiomDistiller(gemini_client=client)

        result = distiller.distill(conversations=None, scars=None)

        assert result.axioms == [], (
            f"All axioms below threshold — expected empty list, got {result.axioms}"
        )

    def test_confidence_threshold_constant_is_0_6(self):
        """CONFIDENCE_THRESHOLD must be 0.6."""
        assert CONFIDENCE_THRESHOLD == 0.6, (
            f"CONFIDENCE_THRESHOLD must be 0.6, got {CONFIDENCE_THRESHOLD}"
        )


# ===========================================================================
# BB4: EpochKnowledgeWriter writes axioms to KG file path
# ===========================================================================


class TestBB4_EpochKnowledgeWriterWritesToKGPath:
    """BB4: EpochKnowledgeWriter.write() persists axioms to the KG JSONL file."""

    def test_axioms_written_to_kg_file(self, tmp_path: Path):
        """write() creates and populates the KG file with one JSON line per axiom."""
        kg_path = str(tmp_path / "test_kg.jsonl")
        writer = EpochKnowledgeWriter(kg_path=kg_path)

        axioms = _make_axiom_list(3)
        result = writer.write(axioms, epoch_id="epoch_2026_02_25")

        assert result.kg_file_path == kg_path
        assert result.jsonl_entries == 3

        # Verify file exists and has exactly 3 lines
        content = Path(kg_path).read_text(encoding="utf-8")
        lines = [l for l in content.splitlines() if l.strip()]
        assert len(lines) == 3, f"Expected 3 lines, got {len(lines)}"

    def test_each_kg_line_is_valid_json_with_axiom_id(self, tmp_path: Path):
        """Each line in the KG file is valid JSON containing the axiom id."""
        kg_path = str(tmp_path / "test_kg.jsonl")
        writer = EpochKnowledgeWriter(kg_path=kg_path)
        axioms = _make_axiom_list(2)

        writer.write(axioms, epoch_id="epoch_2026_02_25")

        lines = Path(kg_path).read_text(encoding="utf-8").splitlines()
        ids = {json.loads(l)["id"] for l in lines if l.strip()}
        expected_ids = {a.id for a in axioms}
        assert ids == expected_ids, f"Expected axiom IDs {expected_ids}, got {ids}"

    def test_write_with_empty_axioms_returns_zero_entries(self, tmp_path: Path):
        """write() with an empty axiom list returns jsonl_entries=0."""
        kg_path = str(tmp_path / "empty_kg.jsonl")
        writer = EpochKnowledgeWriter(kg_path=kg_path)

        result = writer.write([], epoch_id="epoch_2026_02_25")

        assert result.jsonl_entries == 0

    def test_kg_file_path_constant(self):
        """KG_FILE_PATH must reference genesis_evolution_learnings.jsonl."""
        assert "genesis_evolution_learnings.jsonl" in KG_FILE_PATH, (
            f"KG_FILE_PATH should reference genesis_evolution_learnings.jsonl, "
            f"got: {KG_FILE_PATH!r}"
        )


# ===========================================================================
# BB5: ConversationAggregator returns WeeklyConversationSummary with correct fields
# ===========================================================================


class TestBB5_ConversationAggregatorReturnsWeeklySummary:
    """BB5: ConversationAggregator.aggregate() returns a valid WeeklyConversationSummary."""

    def test_no_db_returns_zeroed_summary(self):
        """When pg_connection=None, aggregate() returns a zeroed WeeklyConversationSummary."""
        aggregator = ConversationAggregator(pg_connection=None)
        result = aggregator.aggregate(lookback_days=7)

        assert isinstance(result, WeeklyConversationSummary)
        assert result.total_sessions == 0
        assert result.total_tasks == 0
        assert result.failed_tasks == 0
        assert result.conversation_snippets == []

    def test_summary_has_period_start_and_end(self):
        """WeeklyConversationSummary must include period_start and period_end datetimes."""
        aggregator = ConversationAggregator(pg_connection=None)
        result = aggregator.aggregate(lookback_days=7)

        assert isinstance(result.period_start, datetime), (
            "period_start must be a datetime"
        )
        assert isinstance(result.period_end, datetime), (
            "period_end must be a datetime"
        )
        assert result.period_start < result.period_end, (
            "period_start must be earlier than period_end"
        )

    def test_aggregate_with_mocked_db(self):
        """With a mocked Postgres connection, aggregate() returns correct counts."""
        mock_cursor = MagicMock()
        # fetchone() sequence: total_tasks=15, failed_tasks=3
        mock_cursor.fetchone.side_effect = [(15,), (3,)]
        # fetchall() for event snippets
        mock_cursor.fetchall.return_value = [
            ("task_complete", "Saga completed successfully"),
            ("error", "Saga encountered an error"),
        ]

        mock_conn = MagicMock()
        mock_conn.cursor.return_value = mock_cursor

        aggregator = ConversationAggregator(pg_connection=mock_conn)
        result = aggregator.aggregate(lookback_days=7)

        assert result.total_tasks == 15
        assert result.failed_tasks == 3
        assert isinstance(result.conversation_snippets, list)
        assert len(result.conversation_snippets) == 2

    def test_sensitive_data_scrubbed_from_snippets(self):
        """API keys and tokens in snippets are replaced with [REDACTED]."""
        mock_cursor = MagicMock()
        mock_cursor.fetchone.side_effect = [(1,), (0,)]
        mock_cursor.fetchall.return_value = [
            ("api_call", "Used sk-secret12345 for auth"),
        ]

        mock_conn = MagicMock()
        mock_conn.cursor.return_value = mock_cursor

        aggregator = ConversationAggregator(pg_connection=mock_conn)
        result = aggregator.aggregate(lookback_days=7)

        # The snippet must have the secret redacted
        assert any("[REDACTED]" in s for s in result.conversation_snippets), (
            f"Expected [REDACTED] in snippets, got: {result.conversation_snippets}"
        )


# ===========================================================================
# BB6: EpochReportGenerator produces markdown with "# Genesis Nightly Epoch" header
# ===========================================================================


class TestBB6_EpochReportGeneratorProducesMarkdown:
    """BB6: EpochReportGenerator.generate() produces markdown with required header."""

    def test_report_has_genesis_nightly_epoch_header(self, tmp_path: Path):
        """Generated Markdown must start with '# Genesis Nightly Epoch'."""
        generator = EpochReportGenerator(report_dir=str(tmp_path))
        result_data = EpochResult(
            epoch_id="2026_02_25",
            phases_completed=["conversation_aggregate", "axiom_distill"],
            axioms=_make_axiom_list(2),
            week_summary="Good week.",
        )

        report = generator.generate(result_data)

        assert "# Genesis Nightly Epoch" in report.markdown_content, (
            f"Report must contain '# Genesis Nightly Epoch' header.\n"
            f"Got content starting with: {report.markdown_content[:100]!r}"
        )

    def test_report_file_follows_epoch_id_convention(self, tmp_path: Path):
        """Report file path must follow epoch_{epoch_id}.md convention."""
        generator = EpochReportGenerator(report_dir=str(tmp_path))
        result_data = EpochResult(epoch_id="2026_02_25")

        report = generator.generate(result_data)

        assert report.file_path.endswith("epoch_2026_02_25.md"), (
            f"Report file must end with epoch_2026_02_25.md, got: {report.file_path!r}"
        )

    def test_report_is_written_to_disk(self, tmp_path: Path):
        """Generated report must be written as an actual file."""
        generator = EpochReportGenerator(report_dir=str(tmp_path))
        result_data = EpochResult(
            epoch_id="2026_02_25",
            phases_completed=["conversation_aggregate"],
            axioms=[],
        )

        report = generator.generate(result_data)

        assert Path(report.file_path).exists(), (
            f"Report file not found at {report.file_path}"
        )

    def test_report_includes_phases_and_axiom_count(self, tmp_path: Path):
        """Report body must reference phases completed and axiom count."""
        generator = EpochReportGenerator(report_dir=str(tmp_path))
        result_data = EpochResult(
            epoch_id="2026_02_25",
            phases_completed=["conversation_aggregate", "axiom_distill", "knowledge_write"],
            axioms=_make_axiom_list(3),
            tier1_updates=5,
        )

        report = generator.generate(result_data)
        content = report.markdown_content

        assert "conversation_aggregate" in content, "Phase name must appear in report"
        assert "3" in content, "Axiom count must appear in report"


# ===========================================================================
# BB7: EpochTier1Trigger calls both knowledge_writer and tier1_updater
# ===========================================================================


class TestBB7_EpochTier1TriggerCallsBothDependencies:
    """BB7: EpochTier1Trigger.apply() calls both knowledge_writer and tier1_updater."""

    def test_knowledge_writer_write_is_called(self, tmp_path: Path):
        """knowledge_writer.write() is called exactly once per apply()."""
        mock_writer = MagicMock()
        mock_writer.write.return_value = WriteResult(
            kg_file_path=KG_FILE_PATH, qdrant_upserts=2, jsonl_entries=2
        )
        mock_updater = MagicMock()
        mock_updater.apply_tier1.return_value = Tier1Result(
            kg_entities_added=0, scars_updated=0, prompts_updated=1, rules_updated=0
        )
        trigger = EpochTier1Trigger(
            knowledge_writer=mock_writer,
            tier1_updater=mock_updater,
            log_path=str(tmp_path / "tier1.jsonl"),
        )

        trigger.apply(_make_axiom_list(2), _make_analysis())

        mock_writer.write.assert_called_once()

    def test_tier1_updater_apply_tier1_is_called(self, tmp_path: Path):
        """tier1_updater.apply_tier1() is called exactly once per apply()."""
        mock_writer = MagicMock()
        mock_writer.write.return_value = WriteResult(
            kg_file_path=KG_FILE_PATH, qdrant_upserts=0, jsonl_entries=0
        )
        mock_updater = MagicMock()
        mock_updater.apply_tier1.return_value = Tier1Result(
            kg_entities_added=0, scars_updated=0, prompts_updated=0, rules_updated=1
        )
        trigger = EpochTier1Trigger(
            knowledge_writer=mock_writer,
            tier1_updater=mock_updater,
            log_path=str(tmp_path / "tier1.jsonl"),
        )

        trigger.apply(_make_axiom_list(2), _make_analysis())

        mock_updater.apply_tier1.assert_called_once()

    def test_both_called_in_same_apply_invocation(self, tmp_path: Path):
        """Both writer.write() and updater.apply_tier1() called in a single apply()."""
        mock_writer = MagicMock()
        mock_writer.write.return_value = WriteResult(
            kg_file_path=KG_FILE_PATH, qdrant_upserts=1, jsonl_entries=1
        )
        mock_updater = MagicMock()
        mock_updater.apply_tier1.return_value = Tier1Result(
            kg_entities_added=0, scars_updated=0, prompts_updated=1, rules_updated=1
        )
        trigger = EpochTier1Trigger(
            knowledge_writer=mock_writer,
            tier1_updater=mock_updater,
            log_path=str(tmp_path / "tier1.jsonl"),
        )

        trigger.apply(_make_axiom_list(1), _make_analysis())

        assert mock_writer.write.call_count == 1
        assert mock_updater.apply_tier1.call_count == 1


# ===========================================================================
# WB1: Gemini Pro (not Flash) used for distillation
# ===========================================================================


class TestWB1_GeminiProUsedForDistillation:
    """WB1: AxiomDistiller uses gemini-pro model, not gemini-flash."""

    def test_default_model_is_gemini_pro(self):
        """Default model_name must be 'gemini-pro'."""
        distiller = AxiomDistiller()
        assert distiller.model_name == "gemini-pro", (
            f"Default model_name must be 'gemini-pro', got {distiller.model_name!r}"
        )

    def test_model_name_is_not_flash(self):
        """Default model must not be a Flash variant."""
        distiller = AxiomDistiller()
        assert "flash" not in distiller.model_name.lower(), (
            f"Distiller must not use Flash model for axiom distillation; "
            f"model_name={distiller.model_name!r}"
        )

    def test_gemini_client_called_with_prompt(self):
        """Gemini client callable is invoked with the distillation prompt."""
        captured = {}

        def fake_client(prompt: str) -> str:
            captured["prompt"] = prompt
            return json.dumps({"axioms": [], "week_summary": "Empty week."})

        distiller = AxiomDistiller(gemini_client=fake_client, model_name="gemini-pro")
        conversations = WeeklyConversationSummary(
            total_sessions=3,
            total_tasks=10,
            failed_tasks=1,
            conversation_snippets=["event: did something"],
        )
        distiller.distill(conversations=conversations, scars=None)

        assert "prompt" in captured, "Gemini client must be called with a prompt"
        assert len(captured["prompt"]) > 50, "Prompt must contain meaningful content"

    def test_no_client_returns_empty_result(self):
        """Without a Gemini client, distill() returns empty DistillationResult."""
        distiller = AxiomDistiller(gemini_client=None)
        result = distiller.distill(conversations=None, scars=None)

        assert isinstance(result, DistillationResult)
        assert result.axioms == []
        assert result.week_summary == ""


# ===========================================================================
# WB2: epoch:lock:nightly key has TTL=7200
# ===========================================================================


class TestWB2_EpochLockTTLIs7200:
    """WB2: Redis SET NX EX call uses key=epoch:lock:nightly and TTL=7200."""

    def test_lock_ttl_constant_is_7200(self):
        """EPOCH_LOCK_TTL must equal 7200 (2 hours)."""
        assert EPOCH_LOCK_TTL == 7200, (
            f"EPOCH_LOCK_TTL must be 7200, got {EPOCH_LOCK_TTL}"
        )

    def test_acquire_uses_epoch_lock_nightly_key(self):
        """acquire() calls redis.set with EPOCH_LOCK_KEY as the first positional arg."""
        mock_redis = MagicMock()
        mock_redis.set.return_value = True

        lock = RedisEpochLock(mock_redis)
        lock.acquire("epoch_2026_02_25")

        call_args = mock_redis.set.call_args
        key_arg = call_args.args[0] if call_args.args else call_args.kwargs.get("key")
        assert key_arg == EPOCH_LOCK_KEY, (
            f"Redis SET key must be {EPOCH_LOCK_KEY!r}, got {key_arg!r}"
        )

    def test_acquire_ex_param_equals_ttl(self):
        """The ex= param in redis.set must equal EPOCH_LOCK_TTL (7200)."""
        mock_redis = MagicMock()
        mock_redis.set.return_value = True

        lock = RedisEpochLock(mock_redis)
        lock.acquire("epoch_2026_02_25")

        kwargs = mock_redis.set.call_args.kwargs
        assert kwargs.get("ex") == 7200, (
            f"Lock TTL (ex=) must be 7200, got {kwargs.get('ex')!r}"
        )


# ===========================================================================
# WB3: EpochRunner executes phases sequentially (verify call order)
# ===========================================================================


class TestWB3_EpochRunnerPhasesAreSequential:
    """WB3: EpochRunner calls phases in the correct sequential order."""

    def test_aggregator_called_before_distiller(self, tmp_path: Path):
        """Phase 1 (conversation_aggregate) must run before Phase 3 (axiom_distill)."""
        call_order = []

        def track_aggregate(*args, **kwargs):
            call_order.append("aggregate")
            return WeeklyConversationSummary(
                total_sessions=1, total_tasks=5, failed_tasks=0
            )

        def track_distill(*args, **kwargs):
            call_order.append("distill")
            return DistillationResult(axioms=_make_axiom_list(1), week_summary="ok")

        aggregator = MagicMock()
        aggregator.aggregate.side_effect = track_aggregate

        distiller = MagicMock()
        distiller.distill.side_effect = track_distill

        runner = _make_full_epoch_runner(
            tmp_path, aggregator=aggregator, distiller=distiller
        )
        _run(runner.run_epoch("epoch_2026_02_25"))

        assert call_order.index("aggregate") < call_order.index("distill"), (
            f"Aggregation must precede distillation. Call order: {call_order}"
        )

    def test_distiller_called_before_knowledge_writer(self, tmp_path: Path):
        """Phase 3 (distill) must complete before Phase 4 (knowledge_write)."""
        call_order = []

        def track_distill(*args, **kwargs):
            call_order.append("distill")
            return DistillationResult(axioms=_make_axiom_list(1), week_summary="ok")

        def track_write(*args, **kwargs):
            call_order.append("write")
            return WriteResult(kg_file_path=KG_FILE_PATH, qdrant_upserts=1, jsonl_entries=1)

        distiller = MagicMock()
        distiller.distill.side_effect = track_distill

        knowledge_writer = MagicMock()
        knowledge_writer.write.side_effect = track_write

        runner = _make_full_epoch_runner(
            tmp_path, distiller=distiller, knowledge_writer=knowledge_writer
        )
        _run(runner.run_epoch("epoch_2026_02_25"))

        assert call_order.index("distill") < call_order.index("write"), (
            f"Distillation must precede knowledge write. Call order: {call_order}"
        )

    def test_report_generator_is_called_last(self, tmp_path: Path):
        """EpochReportGenerator.generate() is the final step."""
        call_order = []

        def track_tier1(*args, **kwargs):
            call_order.append("tier1")
            return Tier1EpochResult(
                kg_axioms_written=1, qdrant_scars_updated=1,
                prompt_templates_updated=1, rules_appended=0
            )

        def track_report(*args, **kwargs):
            call_order.append("report")
            return EpochReport(
                markdown_content="# Genesis Nightly Epoch\n",
                file_path=str(tmp_path / "report.md"),
                epoch_id="epoch_2026_02_25",
            )

        tier1_trigger = MagicMock()
        tier1_trigger.apply.side_effect = track_tier1

        report_generator = MagicMock()
        report_generator.generate.side_effect = track_report

        runner = _make_full_epoch_runner(
            tmp_path,
            tier1_trigger=tier1_trigger,
            report_generator=report_generator,
        )
        _run(runner.run_epoch("epoch_2026_02_25"))

        assert "report" in call_order, "report_generator.generate() must be called"
        if "tier1" in call_order:
            assert call_order.index("tier1") < call_order.index("report"), (
                f"Tier1 update must precede report generation. Call order: {call_order}"
            )


# ===========================================================================
# WB4: Lock released in finally block (verify release called even on error)
# ===========================================================================


class TestWB4_LockReleasedInFinallyBlock:
    """WB4: run_epoch_safe() releases the lock even when run_epoch raises."""

    def test_lock_released_after_successful_run(self, tmp_path: Path):
        """Lock is released after a normal epoch run completes."""
        mock_lock = MagicMock()
        mock_lock.acquire.return_value = True
        mock_lock.release.return_value = None

        runner = _make_full_epoch_runner(tmp_path, lock=mock_lock)
        _run(runner.run_epoch_safe())

        mock_lock.release.assert_called_once()

    def test_lock_released_when_run_epoch_raises(self, tmp_path: Path):
        """Lock is released even when run_epoch raises an unexpected exception."""
        mock_lock = MagicMock()
        mock_lock.acquire.return_value = True

        # Make aggregator blow up to force a partial failure
        mock_aggregator = MagicMock()
        mock_aggregator.aggregate.side_effect = RuntimeError("Postgres exploded")

        runner = _make_full_epoch_runner(
            tmp_path, lock=mock_lock, aggregator=mock_aggregator
        )

        # run_epoch_safe should not propagate the error, but lock must be released
        result = _run(runner.run_epoch_safe())

        # release() must still have been called
        mock_lock.release.assert_called_once()

    def test_run_epoch_safe_returns_none_when_lock_not_acquired(self, tmp_path: Path):
        """If the lock cannot be acquired, run_epoch_safe() returns None immediately."""
        mock_lock = MagicMock()
        mock_lock.acquire.return_value = False  # Lock already held

        runner = _make_full_epoch_runner(tmp_path, lock=mock_lock)
        result = _run(runner.run_epoch_safe())

        assert result is None, (
            f"run_epoch_safe() must return None when lock is not acquired, "
            f"got {result!r}"
        )


# ===========================================================================
# WB5: EpochRunner.run_epoch_safe() returns None when lock already held
# ===========================================================================


class TestWB5_RunEpochSafeReturnsNoneWhenLockHeld:
    """WB5: run_epoch_safe() returns None immediately if lock already acquired."""

    def test_returns_none_when_lock_held(self, tmp_path: Path):
        """Lock held by another process: run_epoch_safe() returns None, no epoch runs."""
        mock_lock = MagicMock()
        mock_lock.acquire.return_value = False

        mock_aggregator = MagicMock()

        runner = _make_full_epoch_runner(
            tmp_path, lock=mock_lock, aggregator=mock_aggregator
        )
        result = _run(runner.run_epoch_safe())

        assert result is None
        # Aggregator must NOT have been called — epoch was skipped
        mock_aggregator.aggregate.assert_not_called()

    def test_returns_epoch_result_when_lock_acquired(self, tmp_path: Path):
        """When lock IS acquired, run_epoch_safe() returns an EpochResult."""
        mock_lock = MagicMock()
        mock_lock.acquire.return_value = True

        runner = _make_full_epoch_runner(tmp_path, lock=mock_lock)
        result = _run(runner.run_epoch_safe())

        assert result is not None, "run_epoch_safe() must return EpochResult on success"


# ===========================================================================
# WB6: Epoch log written to data/observability/epoch_log.jsonl
# ===========================================================================


class TestWB6_EpochLogWrittenToJSONL:
    """WB6: EpochRunner writes an epoch log entry to epoch_log.jsonl."""

    def test_epoch_log_written_after_run(self, tmp_path: Path):
        """After run_epoch(), an entry is appended to epoch_log.jsonl."""
        epoch_log = str(tmp_path / "epoch_log.jsonl")
        runner = _make_full_epoch_runner(tmp_path)
        runner.epoch_log_path = epoch_log

        _run(runner.run_epoch("epoch_2026_02_25"))

        assert Path(epoch_log).exists(), f"Epoch log not created at {epoch_log}"

        lines = [l for l in Path(epoch_log).read_text().splitlines() if l.strip()]
        assert len(lines) == 1, f"Expected 1 log entry, got {len(lines)}"

    def test_epoch_log_entry_has_expected_fields(self, tmp_path: Path):
        """Epoch log entry contains epoch_id, phases_completed, axioms_count."""
        epoch_log = str(tmp_path / "epoch_log.jsonl")
        runner = _make_full_epoch_runner(tmp_path)
        runner.epoch_log_path = epoch_log

        _run(runner.run_epoch("epoch_2026_02_25"))

        entry = json.loads(Path(epoch_log).read_text().strip())
        assert entry.get("epoch_id") == "epoch_2026_02_25"
        assert "phases_completed" in entry
        assert "axioms_count" in entry
        assert "duration_seconds" in entry

    def test_epoch_log_path_constant(self):
        """EPOCH_LOG_PATH must reference data/observability/epoch_log.jsonl."""
        assert "epoch_log.jsonl" in EPOCH_LOG_PATH, (
            f"EPOCH_LOG_PATH should contain 'epoch_log.jsonl', got: {EPOCH_LOG_PATH!r}"
        )
        assert "observability" in EPOCH_LOG_PATH, (
            f"EPOCH_LOG_PATH should be under data/observability/, got: {EPOCH_LOG_PATH!r}"
        )


# ===========================================================================
# WB7: Report file path follows epoch_{epoch_id}.md convention
# ===========================================================================


class TestWB7_ReportFilePathFollowsConvention:
    """WB7: EpochReportGenerator uses epoch_{epoch_id}.md filename convention."""

    def test_report_filename_is_epoch_id_dot_md(self, tmp_path: Path):
        """Report file is named epoch_{epoch_id}.md — not 'report.md' or similar."""
        generator = EpochReportGenerator(report_dir=str(tmp_path))
        result = EpochResult(epoch_id="2026_02_25")

        report = generator.generate(result)

        filename = os.path.basename(report.file_path)
        assert filename == "epoch_2026_02_25.md", (
            f"Expected 'epoch_2026_02_25.md', got {filename!r}"
        )

    def test_different_epoch_ids_produce_different_filenames(self, tmp_path: Path):
        """Different epoch IDs must generate distinct file names."""
        generator = EpochReportGenerator(report_dir=str(tmp_path))

        r1 = generator.generate(EpochResult(epoch_id="2026_02_24"))
        r2 = generator.generate(EpochResult(epoch_id="2026_02_25"))

        assert r1.file_path != r2.file_path, (
            "Different epoch IDs must produce different file paths"
        )
        assert "epoch_2026_02_24.md" in r1.file_path
        assert "epoch_2026_02_25.md" in r2.file_path

    def test_report_dir_is_created_if_missing(self, tmp_path: Path):
        """EpochReportGenerator creates the report directory if it does not exist."""
        nested_dir = str(tmp_path / "deep" / "reports")
        generator = EpochReportGenerator(report_dir=nested_dir)

        # Directory does NOT exist yet
        assert not os.path.isdir(nested_dir)

        generator.generate(EpochResult(epoch_id="2026_02_25"))

        assert os.path.isdir(nested_dir), (
            f"Report directory should have been created at {nested_dir}"
        )


# ===========================================================================
# Additional Integration-Style Tests — cross-component
# ===========================================================================


class TestIntegration_FullEpochPipeline:
    """Integration-style tests verifying the complete epoch pipeline end-to-end (all mocked)."""

    def test_full_epoch_run_returns_epoch_result(self, tmp_path: Path):
        """run_epoch() returns an EpochResult with populated fields."""
        runner = _make_full_epoch_runner(tmp_path)
        result = _run(runner.run_epoch("epoch_2026_02_25"))

        assert hasattr(result, "epoch_id")
        assert result.epoch_id == "epoch_2026_02_25"
        assert isinstance(result.phases_completed, list)
        assert isinstance(result.axioms, list)

    def test_full_epoch_completes_minimum_phases(self, tmp_path: Path):
        """A healthy epoch run must complete at least the core 4 phases."""
        runner = _make_full_epoch_runner(tmp_path)
        result = _run(runner.run_epoch("epoch_2026_02_25"))

        core_phases = {
            "conversation_aggregate",
            "axiom_distill",
            "knowledge_write",
            "tier1_update",
        }
        completed = set(result.phases_completed)
        missing = core_phases - completed
        assert not missing, (
            f"Epoch must complete core phases {core_phases}. "
            f"Missing: {missing}. Completed: {completed}"
        )

    def test_run_epoch_safe_acquires_and_releases_lock(self, tmp_path: Path):
        """run_epoch_safe() acquires lock before running and releases it after."""
        mock_lock = MagicMock()
        mock_lock.acquire.return_value = True

        runner = _make_full_epoch_runner(tmp_path, lock=mock_lock)
        _run(runner.run_epoch_safe())

        mock_lock.acquire.assert_called_once()
        mock_lock.release.assert_called_once()

    def test_epoch_report_written_after_run(self, tmp_path: Path):
        """After run_epoch(), report_generator.generate() has been called."""
        mock_report_gen = MagicMock()
        mock_report_gen.generate.return_value = EpochReport(
            markdown_content="# Genesis Nightly Epoch\n",
            file_path=str(tmp_path / "report.md"),
            epoch_id="epoch_2026_02_25",
        )

        runner = _make_full_epoch_runner(tmp_path, report_generator=mock_report_gen)
        _run(runner.run_epoch("epoch_2026_02_25"))

        mock_report_gen.generate.assert_called_once()


# ===========================================================================
# Package __init__ exports
# ===========================================================================


class TestPackageInitExports:
    """Verify that core.epoch __init__ exports all expected symbols."""

    def test_epoch_scheduler_exported(self):
        from core.epoch import EpochScheduler as ES
        assert ES is EpochScheduler

    def test_epoch_runner_exported(self):
        from core.epoch import EpochRunner as ER
        assert ER is EpochRunner

    def test_redis_epoch_lock_exported(self):
        from core.epoch import RedisEpochLock as REL
        assert REL is RedisEpochLock

    def test_conversation_aggregator_exported(self):
        from core.epoch import ConversationAggregator as CA
        assert CA is ConversationAggregator

    def test_axiom_distiller_exported(self):
        from core.epoch import AxiomDistiller as AD
        assert AD is AxiomDistiller

    def test_epoch_knowledge_writer_exported(self):
        from core.epoch import EpochKnowledgeWriter as EKW
        assert EKW is EpochKnowledgeWriter

    def test_epoch_report_generator_exported(self):
        from core.epoch import EpochReportGenerator as ERG
        assert ERG is EpochReportGenerator

    def test_epoch_tier1_trigger_exported(self):
        from core.epoch import EpochTier1Trigger as ETT
        assert ETT is EpochTier1Trigger


# ===========================================================================
# Standalone runner (execute directly without pytest)
# ===========================================================================

if __name__ == "__main__":
    import traceback

    def _tmp() -> Path:
        return Path(tempfile.mkdtemp())

    all_tests = [
        # BB1 — EpochScheduler force_trigger
        ("BB1: force_trigger calls run_epoch_safe", TestBB1_ForceTriggerFiresRunEpochSafe().test_force_trigger_calls_run_epoch_safe),
        ("BB1: force_trigger calls safe not raw", TestBB1_ForceTriggerFiresRunEpochSafe().test_force_trigger_calls_run_epoch_safe_not_run_epoch),
        ("BB1: force_trigger works without start", TestBB1_ForceTriggerFiresRunEpochSafe().test_force_trigger_works_without_start),
        # BB2 — RedisEpochLock duplicate prevention
        ("BB2: second acquire returns False", TestBB2_RedisEpochLockPreventsDuplicate().test_second_acquire_returns_false),
        ("BB2: acquire passes nx and ex to redis", TestBB2_RedisEpochLockPreventsDuplicate().test_acquire_passes_nx_and_ex_to_redis),
        ("BB2: acquire without redis always succeeds", TestBB2_RedisEpochLockPreventsDuplicate().test_acquire_without_redis_always_succeeds),
        ("BB2: EPOCH_LOCK_KEY constant", TestBB2_RedisEpochLockPreventsDuplicate().test_epoch_lock_key_constant),
        # BB3 — AxiomDistiller confidence filtering
        ("BB3: low confidence axiom filtered out", TestBB3_AxiomDistillerRejectsLowConfidence().test_low_confidence_axiom_filtered_out),
        ("BB3: boundary 0.6 accepted", TestBB3_AxiomDistillerRejectsLowConfidence().test_boundary_exactly_0_6_is_accepted),
        ("BB3: all below threshold returns empty", TestBB3_AxiomDistillerRejectsLowConfidence().test_all_below_threshold_returns_empty),
        ("BB3: CONFIDENCE_THRESHOLD is 0.6", TestBB3_AxiomDistillerRejectsLowConfidence().test_confidence_threshold_constant_is_0_6),
        # BB4 — EpochKnowledgeWriter
        ("BB4: axioms written to KG file", lambda: TestBB4_EpochKnowledgeWriterWritesToKGPath().test_axioms_written_to_kg_file(_tmp())),
        ("BB4: each KG line has axiom id", lambda: TestBB4_EpochKnowledgeWriterWritesToKGPath().test_each_kg_line_is_valid_json_with_axiom_id(_tmp())),
        ("BB4: empty axioms returns zero entries", lambda: TestBB4_EpochKnowledgeWriterWritesToKGPath().test_write_with_empty_axioms_returns_zero_entries(_tmp())),
        ("BB4: KG_FILE_PATH constant", TestBB4_EpochKnowledgeWriterWritesToKGPath().test_kg_file_path_constant),
        # BB5 — ConversationAggregator
        ("BB5: no db returns zeroed summary", TestBB5_ConversationAggregatorReturnsWeeklySummary().test_no_db_returns_zeroed_summary),
        ("BB5: summary has period_start and period_end", TestBB5_ConversationAggregatorReturnsWeeklySummary().test_summary_has_period_start_and_end),
        ("BB5: aggregate with mocked db", TestBB5_ConversationAggregatorReturnsWeeklySummary().test_aggregate_with_mocked_db),
        ("BB5: sensitive data scrubbed", TestBB5_ConversationAggregatorReturnsWeeklySummary().test_sensitive_data_scrubbed_from_snippets),
        # BB6 — EpochReportGenerator
        ("BB6: report has Genesis Nightly Epoch header", lambda: TestBB6_EpochReportGeneratorProducesMarkdown().test_report_has_genesis_nightly_epoch_header(_tmp())),
        ("BB6: report file follows epoch_id.md convention", lambda: TestBB6_EpochReportGeneratorProducesMarkdown().test_report_file_follows_epoch_id_convention(_tmp())),
        ("BB6: report is written to disk", lambda: TestBB6_EpochReportGeneratorProducesMarkdown().test_report_is_written_to_disk(_tmp())),
        ("BB6: report includes phases and axiom count", lambda: TestBB6_EpochReportGeneratorProducesMarkdown().test_report_includes_phases_and_axiom_count(_tmp())),
        # BB7 — EpochTier1Trigger
        ("BB7: knowledge_writer.write() called", lambda: TestBB7_EpochTier1TriggerCallsBothDependencies().test_knowledge_writer_write_is_called(_tmp())),
        ("BB7: tier1_updater.apply_tier1() called", lambda: TestBB7_EpochTier1TriggerCallsBothDependencies().test_tier1_updater_apply_tier1_is_called(_tmp())),
        ("BB7: both called in same apply()", lambda: TestBB7_EpochTier1TriggerCallsBothDependencies().test_both_called_in_same_apply_invocation(_tmp())),
        # WB1 — Gemini Pro model
        ("WB1: default model is gemini-pro", TestWB1_GeminiProUsedForDistillation().test_default_model_is_gemini_pro),
        ("WB1: model is not flash", TestWB1_GeminiProUsedForDistillation().test_model_name_is_not_flash),
        ("WB1: gemini client called with prompt", TestWB1_GeminiProUsedForDistillation().test_gemini_client_called_with_prompt),
        ("WB1: no client returns empty", TestWB1_GeminiProUsedForDistillation().test_no_client_returns_empty_result),
        # WB2 — TTL 7200
        ("WB2: EPOCH_LOCK_TTL is 7200", TestWB2_EpochLockTTLIs7200().test_lock_ttl_constant_is_7200),
        ("WB2: acquire uses epoch:lock:nightly key", TestWB2_EpochLockTTLIs7200().test_acquire_uses_epoch_lock_nightly_key),
        ("WB2: acquire ex param equals TTL", TestWB2_EpochLockTTLIs7200().test_acquire_ex_param_equals_ttl),
        # WB3 — Sequential phases
        ("WB3: aggregator before distiller", lambda: TestWB3_EpochRunnerPhasesAreSequential().test_aggregator_called_before_distiller(_tmp())),
        ("WB3: distiller before knowledge_writer", lambda: TestWB3_EpochRunnerPhasesAreSequential().test_distiller_called_before_knowledge_writer(_tmp())),
        ("WB3: report generator is last", lambda: TestWB3_EpochRunnerPhasesAreSequential().test_report_generator_is_called_last(_tmp())),
        # WB4 — Lock released in finally
        ("WB4: lock released after success", lambda: TestWB4_LockReleasedInFinallyBlock().test_lock_released_after_successful_run(_tmp())),
        ("WB4: lock released when run_epoch raises", lambda: TestWB4_LockReleasedInFinallyBlock().test_lock_released_when_run_epoch_raises(_tmp())),
        ("WB4: returns None when lock not acquired", lambda: TestWB4_LockReleasedInFinallyBlock().test_run_epoch_safe_returns_none_when_lock_not_acquired(_tmp())),
        # WB5 — Returns None when lock held
        ("WB5: returns None when lock held", lambda: TestWB5_RunEpochSafeReturnsNoneWhenLockHeld().test_returns_none_when_lock_held(_tmp())),
        ("WB5: returns EpochResult when lock acquired", lambda: TestWB5_RunEpochSafeReturnsNoneWhenLockHeld().test_returns_epoch_result_when_lock_acquired(_tmp())),
        # WB6 — Epoch log
        ("WB6: epoch log written after run", lambda: TestWB6_EpochLogWrittenToJSONL().test_epoch_log_written_after_run(_tmp())),
        ("WB6: epoch log has expected fields", lambda: TestWB6_EpochLogWrittenToJSONL().test_epoch_log_entry_has_expected_fields(_tmp())),
        ("WB6: EPOCH_LOG_PATH constant", TestWB6_EpochLogWrittenToJSONL().test_epoch_log_path_constant),
        # WB7 — File path convention
        ("WB7: report filename is epoch_id.md", lambda: TestWB7_ReportFilePathFollowsConvention().test_report_filename_is_epoch_id_dot_md(_tmp())),
        ("WB7: different epoch IDs = different filenames", lambda: TestWB7_ReportFilePathFollowsConvention().test_different_epoch_ids_produce_different_filenames(_tmp())),
        ("WB7: report dir created if missing", lambda: TestWB7_ReportFilePathFollowsConvention().test_report_dir_is_created_if_missing(_tmp())),
        # Integration
        ("INT: full epoch returns EpochResult", lambda: TestIntegration_FullEpochPipeline().test_full_epoch_run_returns_epoch_result(_tmp())),
        ("INT: full epoch completes min phases", lambda: TestIntegration_FullEpochPipeline().test_full_epoch_completes_minimum_phases(_tmp())),
        ("INT: run_epoch_safe acquires and releases lock", lambda: TestIntegration_FullEpochPipeline().test_run_epoch_safe_acquires_and_releases_lock(_tmp())),
        ("INT: epoch report written after run", lambda: TestIntegration_FullEpochPipeline().test_epoch_report_written_after_run(_tmp())),
        # Package exports
        ("PKG: EpochScheduler exported", TestPackageInitExports().test_epoch_scheduler_exported),
        ("PKG: EpochRunner exported", TestPackageInitExports().test_epoch_runner_exported),
        ("PKG: RedisEpochLock exported", TestPackageInitExports().test_redis_epoch_lock_exported),
        ("PKG: ConversationAggregator exported", TestPackageInitExports().test_conversation_aggregator_exported),
        ("PKG: AxiomDistiller exported", TestPackageInitExports().test_axiom_distiller_exported),
        ("PKG: EpochKnowledgeWriter exported", TestPackageInitExports().test_epoch_knowledge_writer_exported),
        ("PKG: EpochReportGenerator exported", TestPackageInitExports().test_epoch_report_generator_exported),
        ("PKG: EpochTier1Trigger exported", TestPackageInitExports().test_epoch_tier1_trigger_exported),
    ]

    passed = 0
    failed = 0
    total = len(all_tests)

    for name, fn in all_tests:
        try:
            fn()
            print(f"  [PASS] {name}")
            passed += 1
        except Exception as exc:  # noqa: BLE001
            print(f"  [FAIL] {name}: {exc}")
            traceback.print_exc()
            failed += 1

    print(f"\n{'='*60}")
    print(f"Story 9.09 — Nightly Epoch Unit Tests")
    print(f"{passed}/{total} tests passed ({failed} failed)")
    print(f"{'='*60}")
    if failed == 0:
        print("ALL TESTS PASSED")
    else:
        sys.exit(1)
