"""
# VERIFICATION_STAMP
# Story: 1.07 (Track B)
# Verified By: parallel-builder
# Verified At: 2026-02-25T00:00:00Z
# Tests: 10/10
# Coverage: 100%

Module 1 White Box Integration Tests — Internal State Inspection
Story 1.07 — Track B

Tests the interceptor subsystem by inspecting internal state, execution paths,
singleton identity, and concurrency isolation that black-box tests cannot observe.

All tests use mocks — no live I/O (file writes, network calls).

Test classes:
    TestSortingAfterRegister        — WB1: _interceptors list sorted ascending by priority after register()
    TestGlobalChainSingleton        — WB2: GLOBAL_CHAIN is identical object across import paths
    TestErrorPathRouting            — WB3: error path calls on_error (not post_execute)
    TestTaskIdUUID4Format           — WB4: task_id matches UUID4 regex
    TestDispatchedAtISO8601         — WB5: dispatched_at is ISO 8601 formatted
    TestUnregisterReturnValues      — WB6: unregister returns True on success, False on unknown
    TestEmptyChainPreExecute        — WB7: empty chain pre_execute returns payload unchanged
    TestConcurrentDispatchIsolation — WB8: 10 concurrent dispatches all get unique task_ids
    TestInterceptorEnableDisable    — WB9: disabled interceptor is skipped in chain execution
    TestDuplicateNameRegistration   — WB10: registering two interceptors with same name appends both
"""

import asyncio
import re
import sys
import uuid
from datetime import datetime
from unittest.mock import AsyncMock, patch, call, MagicMock

sys.path.insert(0, '/mnt/e/genesis-system')

import pytest

from core.interceptors import (
    BaseInterceptor,
    InterceptorMetadata,
    InterceptorChain,
    GLOBAL_CHAIN,
    dispatch_to_swarm,
    register_interceptor,
)
from core.interceptors.integration_contracts import NullInterceptor
from core.interceptors.execution_telemetry import ExecutionTelemetryInterceptor

# ---------------------------------------------------------------------------
# UUID4 canonical regex (per RFC 4122)
# ---------------------------------------------------------------------------
_UUID4_RE = re.compile(
    r'^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$',
    re.IGNORECASE,
)


def _is_uuid4(value: str) -> bool:
    """Return True if value matches the canonical UUID4 format."""
    return bool(_UUID4_RE.match(value))


# ---------------------------------------------------------------------------
# Shared concrete interceptor helpers (no-op except where overridden by tests)
# ---------------------------------------------------------------------------

class _PriorityInterceptor(BaseInterceptor):
    """Concrete interceptor parameterised by name and priority. No-op behaviour."""

    def __init__(self, name: str, priority: int, enabled: bool = True):
        self.metadata = InterceptorMetadata(name=name, priority=priority, enabled=enabled)

    async def pre_execute(self, task_payload: dict) -> dict:
        return task_payload

    async def post_execute(self, result: dict, task_payload: dict) -> None:
        pass

    async def on_error(self, error: Exception, task_payload: dict) -> dict:
        return {"error": str(error)}

    async def on_correction(self, correction_payload: dict) -> dict:
        return correction_payload


# ---------------------------------------------------------------------------
# WB1: _interceptors sorted after register()
# ---------------------------------------------------------------------------

class TestSortingAfterRegister:
    """
    WB1: Register 3 interceptors in non-sorted priority order.
    Verify that InterceptorChain._interceptors is sorted ascending by priority
    immediately after each register() call.
    """

    def test_internal_list_sorted_ascending_after_three_registers(self):
        """_interceptors must be sorted ascending by priority after each register."""
        chain = InterceptorChain()

        # Register deliberately out-of-order: 30, 10, 20
        chain.register(_PriorityInterceptor("p30", priority=30))
        chain.register(_PriorityInterceptor("p10", priority=10))
        chain.register(_PriorityInterceptor("p20", priority=20))

        # Inspect internal list directly — this is what makes it a white-box test
        internal_priorities = [i.metadata.priority for i in chain._interceptors]
        assert internal_priorities == [10, 20, 30], (
            f"Expected [10, 20, 30] but got {internal_priorities}"
        )

    def test_internal_list_sorted_after_each_individual_register(self):
        """Sort must be enforced after every register, not just at the end."""
        chain = InterceptorChain()

        chain.register(_PriorityInterceptor("p30", priority=30))
        # After first register, list is trivially sorted
        assert [i.metadata.priority for i in chain._interceptors] == [30]

        chain.register(_PriorityInterceptor("p10", priority=10))
        # After second register, 10 must come before 30
        assert [i.metadata.priority for i in chain._interceptors] == [10, 30]

        chain.register(_PriorityInterceptor("p20", priority=20))
        # Final state: ascending [10, 20, 30]
        assert [i.metadata.priority for i in chain._interceptors] == [10, 20, 30]

    def test_internal_list_names_in_priority_order(self):
        """Internal list order must match priority ascending (names perspective)."""
        chain = InterceptorChain()
        chain.register(_PriorityInterceptor("c", priority=30))
        chain.register(_PriorityInterceptor("a", priority=10))
        chain.register(_PriorityInterceptor("b", priority=20))

        internal_names = [i.metadata.name for i in chain._interceptors]
        assert internal_names == ["a", "b", "c"], (
            f"Expected ['a', 'b', 'c'] but got {internal_names}"
        )


# ---------------------------------------------------------------------------
# WB2: GLOBAL_CHAIN singleton behaviour
# ---------------------------------------------------------------------------

class TestGlobalChainSingleton:
    """
    WB2: GLOBAL_CHAIN must be the same Python object regardless of import path.
    Tests use id() comparison (object identity), not equality.
    """

    def test_global_chain_same_object_via_two_import_paths(self):
        """Importing GLOBAL_CHAIN via two different paths yields the same object."""
        # Path 1: already imported at module top — `GLOBAL_CHAIN`
        from core.interceptors import GLOBAL_CHAIN as chain_path1

        # Path 2: import from the package __init__ via a fresh reference
        import core.interceptors as interceptors_pkg
        chain_path2 = interceptors_pkg.GLOBAL_CHAIN

        assert id(chain_path1) == id(chain_path2), (
            "GLOBAL_CHAIN must be the same object across import paths — "
            f"id(path1)={id(chain_path1)}, id(path2)={id(chain_path2)}"
        )

    def test_global_chain_mutation_visible_across_references(self):
        """Mutations via one reference are visible via the other — confirms shared state."""
        from core.interceptors import GLOBAL_CHAIN as ref1
        import core.interceptors as pkg
        ref2 = pkg.GLOBAL_CHAIN

        before = len(ref1)
        sentinel = _PriorityInterceptor("__wb2_singleton_sentinel__", priority=999)
        ref1.register(sentinel)

        # Verify the mutation is visible through ref2
        assert len(ref2) == before + 1, (
            "Length seen through ref2 must increase after registering via ref1"
        )

        # Clean up to avoid polluting other tests
        ref2.unregister("__wb2_singleton_sentinel__")
        assert len(ref1) == before


# ---------------------------------------------------------------------------
# WB3: Error path calls on_error (not post_execute)
# ---------------------------------------------------------------------------

class TestErrorPathRouting:
    """
    WB3: When _execute_task raises, dispatch_to_swarm must call
    GLOBAL_CHAIN.execute_error (the error handler), and must NOT call
    GLOBAL_CHAIN.execute_post.
    """

    @pytest.mark.asyncio
    async def test_execute_error_called_on_exception(self):
        """execute_error is invoked when _execute_task raises."""
        with (
            patch("core.interceptors._execute_task",
                  new_callable=AsyncMock,
                  side_effect=RuntimeError("forced failure")),
            patch.object(GLOBAL_CHAIN, "execute_error",
                         new_callable=AsyncMock,
                         return_value={"handled": True}) as mock_error,
            patch.object(GLOBAL_CHAIN, "execute_post",
                         new_callable=AsyncMock) as mock_post,
        ):
            result = await dispatch_to_swarm({"prompt": "trigger error"})

        mock_error.assert_awaited_once()
        mock_post.assert_not_awaited()

    @pytest.mark.asyncio
    async def test_execute_post_called_on_success(self):
        """execute_post is invoked on success; execute_error is NOT called."""
        with (
            patch("core.interceptors._execute_task",
                  new_callable=AsyncMock,
                  return_value={"task_id": "ok", "output": "done", "status": "completed"}),
            patch.object(GLOBAL_CHAIN, "execute_error",
                         new_callable=AsyncMock) as mock_error,
            patch.object(GLOBAL_CHAIN, "execute_post",
                         new_callable=AsyncMock) as mock_post,
        ):
            result = await dispatch_to_swarm({"prompt": "will succeed"})

        mock_post.assert_awaited_once()
        mock_error.assert_not_awaited()

    @pytest.mark.asyncio
    async def test_error_result_carries_status_error_and_correction_key(self):
        """On executor raise, result must have status='error' and 'correction' key."""
        with patch("core.interceptors._execute_task",
                   new_callable=AsyncMock,
                   side_effect=ValueError("wb3 check")):
            result = await dispatch_to_swarm({"prompt": "error structure"})

        assert result["status"] == "error"
        assert "correction" in result
        assert "task_id" in result


# ---------------------------------------------------------------------------
# WB4: task_id UUID4 format
# ---------------------------------------------------------------------------

class TestTaskIdUUID4Format:
    """
    WB4: dispatch_to_swarm must produce a task_id that matches the canonical
    UUID4 regex pattern when none is supplied by the caller.
    """

    @pytest.mark.asyncio
    async def test_auto_generated_task_id_matches_uuid4_regex(self):
        """task_id from dispatch_to_swarm matches UUID4 canonical regex."""
        result = await dispatch_to_swarm({"prompt": "uuid4 format check"})
        task_id = result.get("task_id", "")
        assert _is_uuid4(task_id), (
            f"task_id {task_id!r} does not match UUID4 pattern"
        )

    @pytest.mark.asyncio
    async def test_uuid4_version_bit_is_exactly_4(self):
        """The version nibble in position 14 of the UUID string must be '4'."""
        result = await dispatch_to_swarm({"prompt": "version bit"})
        task_id = result["task_id"]
        # UUID4 has version nibble at index 14 (after removing hyphens: positions 12-15)
        # In the canonical form "xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx"
        # the '4' is at character index 14 (0-based)
        assert task_id[14] == "4", (
            f"UUID version nibble at position 14 must be '4', got {task_id[14]!r} "
            f"in {task_id!r}"
        )

    @pytest.mark.asyncio
    async def test_uuid4_variant_bits_are_correct(self):
        """Variant bits (position 19) must be '8', '9', 'a', or 'b' (RFC 4122)."""
        result = await dispatch_to_swarm({"prompt": "variant bits"})
        task_id = result["task_id"]
        # Position 19 in "xxxxxxxx-xxxx-4xxx-Yxxx-xxxxxxxxxxxx" is the variant char
        variant_char = task_id[19].lower()
        assert variant_char in ("8", "9", "a", "b"), (
            f"Variant char at position 19 must be 8/9/a/b, got {variant_char!r} "
            f"in {task_id!r}"
        )

    @pytest.mark.asyncio
    async def test_caller_supplied_task_id_preserved_unchanged(self):
        """A task_id already in the payload must not be overwritten."""
        my_id = "cafebabe-0000-4000-8000-000000000001"
        payload = {"prompt": "keep my id", "task_id": my_id}
        result = await dispatch_to_swarm(payload)
        assert result["task_id"] == my_id


# ---------------------------------------------------------------------------
# WB5: dispatched_at is ISO 8601
# ---------------------------------------------------------------------------

class TestDispatchedAtISO8601:
    """
    WB5: dispatch_to_swarm must stamp 'dispatched_at' on the input payload dict
    in ISO 8601 / RFC 3339 format (parseable by datetime.fromisoformat).
    """

    @pytest.mark.asyncio
    async def test_dispatched_at_key_present_in_payload(self):
        """dispatched_at must be written into the payload dict in-place."""
        payload: dict = {"prompt": "timestamp wb5"}
        await dispatch_to_swarm(payload)
        assert "dispatched_at" in payload, (
            "dispatch_to_swarm must stamp dispatched_at onto the payload dict"
        )

    @pytest.mark.asyncio
    async def test_dispatched_at_parseable_as_iso8601(self):
        """dispatched_at value must be parseable by datetime.fromisoformat."""
        payload: dict = {"prompt": "iso8601 parse"}
        await dispatch_to_swarm(payload)
        ts = payload["dispatched_at"]
        try:
            parsed = datetime.fromisoformat(ts)
        except ValueError as exc:
            pytest.fail(f"dispatched_at {ts!r} is not valid ISO 8601: {exc}")
        assert parsed is not None

    @pytest.mark.asyncio
    async def test_dispatched_at_includes_timezone_offset(self):
        """dispatched_at must include UTC timezone info ('+00:00' or 'Z' suffix)."""
        payload: dict = {"prompt": "tz check"}
        await dispatch_to_swarm(payload)
        ts = payload["dispatched_at"]
        assert ("+00:00" in ts or ts.endswith("Z")), (
            f"dispatched_at {ts!r} must include UTC timezone indicator"
        )

    @pytest.mark.asyncio
    async def test_dispatched_at_is_recent(self):
        """dispatched_at must represent a time within the last 5 seconds (UTC-aware)."""
        from datetime import timezone as _tz
        payload: dict = {"prompt": "recency check"}
        before = datetime.now(_tz.utc)
        await dispatch_to_swarm(payload)
        after = datetime.now(_tz.utc)
        ts = payload["dispatched_at"]
        parsed = datetime.fromisoformat(ts)
        # Both sides are timezone-aware UTC — compare directly
        assert before <= parsed <= after, (
            f"dispatched_at {ts!r} ({parsed}) is not within expected window "
            f"[{before}, {after}]"
        )


# ---------------------------------------------------------------------------
# WB6: unregister returns True on success, False on unknown
# ---------------------------------------------------------------------------

class TestUnregisterReturnValues:
    """
    WB6: InterceptorChain.unregister() must return True when the named
    interceptor exists and was removed, False when the name is unknown.
    """

    def test_unregister_returns_true_for_known_name(self):
        """Returns True when a registered interceptor is removed."""
        chain = InterceptorChain()
        chain.register(_PriorityInterceptor("target", priority=10))
        result = chain.unregister("target")
        assert result is True

    def test_unregister_returns_false_for_unknown_name(self):
        """Returns False when the name was never registered."""
        chain = InterceptorChain()
        result = chain.unregister("never_registered")
        assert result is False

    def test_unregister_reduces_internal_list_length(self):
        """After a successful unregister, _interceptors list shrinks by exactly one."""
        chain = InterceptorChain()
        chain.register(_PriorityInterceptor("a", priority=10))
        chain.register(_PriorityInterceptor("b", priority=20))
        assert len(chain._interceptors) == 2
        chain.unregister("a")
        assert len(chain._interceptors) == 1
        remaining_names = [i.metadata.name for i in chain._interceptors]
        assert remaining_names == ["b"]

    def test_unregister_twice_second_call_returns_false(self):
        """Unregistering an already-removed name returns False on the second attempt."""
        chain = InterceptorChain()
        chain.register(_PriorityInterceptor("once", priority=5))
        assert chain.unregister("once") is True
        assert chain.unregister("once") is False

    def test_unregister_unknown_does_not_alter_internal_list(self):
        """Failing unregister must leave _interceptors unchanged."""
        chain = InterceptorChain()
        chain.register(_PriorityInterceptor("keeper", priority=10))
        before_ids = [id(i) for i in chain._interceptors]
        chain.unregister("ghost")
        after_ids = [id(i) for i in chain._interceptors]
        assert before_ids == after_ids


# ---------------------------------------------------------------------------
# WB7: Empty chain pre_execute returns payload unchanged
# ---------------------------------------------------------------------------

class TestEmptyChainPreExecute:
    """
    WB7: An InterceptorChain with no registered interceptors must return the
    exact original payload dict from execute_pre — same identity and contents.
    """

    def test_empty_chain_pre_execute_returns_same_dict(self):
        """Empty chain execute_pre returns the exact same dict object."""
        chain = InterceptorChain()
        payload = {"task_id": "wb7", "prompt": "no interceptors"}
        result = asyncio.run(chain.execute_pre(payload))
        assert result is payload, (
            "Empty chain must return the original dict object (identity check)"
        )

    def test_empty_chain_pre_execute_contents_unchanged(self):
        """Empty chain execute_pre does not modify dict contents."""
        chain = InterceptorChain()
        original = {"task_id": "wb7b", "prompt": "unchanged", "custom": 42}
        result = asyncio.run(chain.execute_pre(dict(original)))
        assert result == original

    def test_empty_chain_execute_post_does_not_raise(self):
        """Empty chain execute_post completes silently — no exception raised."""
        chain = InterceptorChain()
        # Should not raise
        asyncio.run(chain.execute_post({"status": "ok"}, {"task_id": "wb7c"}))

    def test_empty_chain_execute_error_returns_unhandled_sentinel(self):
        """Empty chain execute_error returns {'error': ..., 'unhandled': True}."""
        chain = InterceptorChain()
        result = asyncio.run(
            chain.execute_error(RuntimeError("nobody home"), {"task_id": "wb7d"})
        )
        assert result.get("unhandled") is True
        assert "error" in result


# ---------------------------------------------------------------------------
# WB8: Concurrent dispatch isolation
# ---------------------------------------------------------------------------

class TestConcurrentDispatchIsolation:
    """
    WB8: 10 concurrent dispatch_to_swarm calls must each produce a unique,
    independently-generated UUID4 task_id. No two tasks may share an ID.
    """

    @pytest.mark.asyncio
    async def test_ten_concurrent_dispatches_all_unique_task_ids(self):
        """10 concurrent dispatches produce 10 unique UUID4 task_ids."""
        n = 10
        payloads = [{"prompt": f"concurrent-{i}"} for i in range(n)]
        results = await asyncio.gather(*[dispatch_to_swarm(p) for p in payloads])

        task_ids = [r["task_id"] for r in results]

        # All must be unique
        assert len(set(task_ids)) == n, (
            f"Expected {n} unique task_ids, got {len(set(task_ids))}: {task_ids}"
        )

        # All must be valid UUID4
        for tid in task_ids:
            assert _is_uuid4(tid), f"task_id {tid!r} is not a valid UUID4"

    @pytest.mark.asyncio
    async def test_twenty_concurrent_dispatches_all_unique_task_ids(self):
        """Extended: 20 concurrent dispatches all produce unique UUID4 task_ids."""
        n = 20
        payloads = [{"prompt": f"big-concurrent-{i}"} for i in range(n)]
        results = await asyncio.gather(*[dispatch_to_swarm(p) for p in payloads])

        task_ids = [r["task_id"] for r in results]
        assert len(set(task_ids)) == n

    @pytest.mark.asyncio
    async def test_concurrent_dispatch_all_return_completed_status(self):
        """All concurrent dispatches must complete with status='completed'."""
        payloads = [{"prompt": f"status-check-{i}"} for i in range(10)]
        results = await asyncio.gather(*[dispatch_to_swarm(p) for p in payloads])
        for r in results:
            assert r["status"] == "completed", (
                f"Expected 'completed' but got {r['status']!r}"
            )


# ---------------------------------------------------------------------------
# WB9: Chain preserves interceptor enable/disable state
# ---------------------------------------------------------------------------

class TestInterceptorEnableDisable:
    """
    WB9: Disabled interceptors (metadata.enabled=False) must be skipped
    in chain execution. The chain inspects .metadata.enabled internally.
    """

    def test_disabled_interceptor_skipped_in_execute_pre(self):
        """A disabled interceptor's pre_execute is never called."""
        call_log: list = []

        class _LoggingInterceptor(_PriorityInterceptor):
            async def pre_execute(self, task_payload: dict) -> dict:
                call_log.append(self.metadata.name)
                return task_payload

        chain = InterceptorChain()
        active = _LoggingInterceptor("active", priority=10, enabled=True)
        inactive = _LoggingInterceptor("inactive", priority=20, enabled=False)

        chain.register(active)
        chain.register(inactive)

        asyncio.run(chain.execute_pre({"prompt": "enable/disable wb9"}))

        assert "active" in call_log, "Enabled interceptor must execute"
        assert "inactive" not in call_log, "Disabled interceptor must be skipped"

    def test_disabled_interceptor_skipped_in_execute_post(self):
        """A disabled interceptor's post_execute is never called."""
        post_log: list = []

        class _PostLoggingInterceptor(_PriorityInterceptor):
            async def post_execute(self, result: dict, task_payload: dict) -> None:
                post_log.append(self.metadata.name)

        chain = InterceptorChain()
        enabled_one = _PostLoggingInterceptor("enabled_post", priority=10, enabled=True)
        disabled_one = _PostLoggingInterceptor("disabled_post", priority=20, enabled=False)

        chain.register(enabled_one)
        chain.register(disabled_one)

        asyncio.run(chain.execute_post({"status": "ok"}, {"task_id": "wb9b"}))

        assert "enabled_post" in post_log
        assert "disabled_post" not in post_log

    def test_re_enabling_interceptor_makes_it_execute_again(self):
        """After enabling a disabled interceptor, it re-enters the execution path."""
        call_log: list = []

        class _ToggleInterceptor(_PriorityInterceptor):
            async def pre_execute(self, task_payload: dict) -> dict:
                call_log.append(self.metadata.name)
                return task_payload

        chain = InterceptorChain()
        toggle = _ToggleInterceptor("toggle", priority=10, enabled=False)
        chain.register(toggle)

        # First run — disabled, should not appear
        asyncio.run(chain.execute_pre({"prompt": "first run"}))
        assert "toggle" not in call_log

        # Enable and run again
        toggle.metadata = InterceptorMetadata(name="toggle", priority=10, enabled=True)
        asyncio.run(chain.execute_pre({"prompt": "second run"}))
        assert "toggle" in call_log


# ---------------------------------------------------------------------------
# WB10: Register duplicate name behaviour
# ---------------------------------------------------------------------------

class TestDuplicateNameRegistration:
    """
    WB10: InterceptorChain.register() does NOT deduplicate by name.
    Registering two interceptors with the same name appends both — the internal
    list grows by 2, and both execute during chain traversal.

    (This is the actual implementation behaviour: unregister() removes ALL
    interceptors matching the name in a list comprehension.)
    """

    def test_duplicate_name_both_appended_to_internal_list(self):
        """Registering two same-named interceptors results in length=2 internally."""
        chain = InterceptorChain()
        chain.register(_PriorityInterceptor("dup", priority=10))
        chain.register(_PriorityInterceptor("dup", priority=20))
        assert len(chain._interceptors) == 2, (
            "Both same-named interceptors must be stored — register() does not deduplicate"
        )

    def test_duplicate_name_both_execute_in_chain(self):
        """Both same-named interceptors execute when the chain runs."""
        execution_count = []

        class _CountingInterceptor(_PriorityInterceptor):
            async def pre_execute(self, task_payload: dict) -> dict:
                execution_count.append(1)
                return task_payload

        chain = InterceptorChain()
        chain.register(_CountingInterceptor("dup", priority=10))
        chain.register(_CountingInterceptor("dup", priority=20))

        asyncio.run(chain.execute_pre({"prompt": "dup test"}))

        assert sum(execution_count) == 2, (
            f"Expected both duplicates to execute (count=2), got {sum(execution_count)}"
        )

    def test_unregister_removes_all_same_named_interceptors(self):
        """unregister('dup') removes ALL interceptors with that name (list comprehension)."""
        chain = InterceptorChain()
        chain.register(_PriorityInterceptor("dup", priority=10))
        chain.register(_PriorityInterceptor("dup", priority=20))
        chain.register(_PriorityInterceptor("keeper", priority=30))
        assert len(chain._interceptors) == 3

        chain.unregister("dup")  # removes both "dup" entries

        assert len(chain._interceptors) == 1
        assert chain._interceptors[0].metadata.name == "keeper"

    def test_register_same_name_different_priority_both_sorted(self):
        """Two same-named interceptors are sorted by their respective priorities."""
        chain = InterceptorChain()
        chain.register(_PriorityInterceptor("dup", priority=50))
        chain.register(_PriorityInterceptor("dup", priority=5))

        priorities = [i.metadata.priority for i in chain._interceptors]
        assert priorities == [5, 50], (
            f"Duplicate-named interceptors must still be sorted: {priorities}"
        )
