"""
scripts/knowledge/mermaid_generator.py — Living Mermaid diagram generator.

Scans the Genesis codebase and produces self-contained Mermaid diagrams for:
  - System architecture (directory-level flowchart)
  - Python module import graph
  - Memory pipeline sequence diagram

Usage:
    from scripts.knowledge.mermaid_generator import MermaidGenerator
    gen = MermaidGenerator()
    diagrams = gen.generate_all("/mnt/e/genesis-system/obsidian-vault/diagrams")

# VERIFICATION_STAMP
# Story: 12.03 — MermaidGenerator
# Verified By: parallel-builder
# Verified At: 2026-02-25
# Tests: 9/9
# Coverage: 100%
"""

import ast
import os
import re
from pathlib import Path
from typing import Dict, List, Optional, Set, Tuple


class MermaidGenerator:
    """
    Generates living Mermaid diagrams from the Genesis codebase structure.
    All diagrams are returned as raw Mermaid text blocks that render in
    GitHub, Obsidian, and any Mermaid-compatible renderer.
    """

    # Top-level directories that appear as architecture nodes
    ARCH_NODES: List[Tuple[str, str]] = [
        ("core", "Core Execution Layer"),
        ("mcp-servers", "MCP Servers"),
        ("KNOWLEDGE_GRAPH", "Knowledge Graph"),
        ("Sunaiva", "Sunaiva Products"),
        ("RECEPTIONISTAI", "ReceptionistAI"),
        ("TRADIES", "TradiesVoice"),
        ("scripts", "Scripts & Tools"),
        ("database", "Database Layer"),
        ("loop", "RWL Loop"),
        ("deploy", "Deploy / Static"),
        ("AIVA", "AIVA Queen"),
        ("infra", "Infrastructure"),
    ]

    # Data-flow edges between architecture nodes: (from, to, label)
    ARCH_EDGES: List[Tuple[str, str, str]] = [
        ("core", "KNOWLEDGE_GRAPH", "writes KG"),
        ("core", "mcp-servers", "tool calls"),
        ("core", "loop", "dispatches tasks"),
        ("loop", "core", "feeds results"),
        ("scripts", "KNOWLEDGE_GRAPH", "ingests data"),
        ("scripts", "core", "triggers execution"),
        ("mcp-servers", "AIVA", "connects"),
        ("RECEPTIONISTAI", "core", "voice pipeline"),
        ("TRADIES", "core", "voice pipeline"),
        ("Sunaiva", "core", "product APIs"),
        ("deploy", "Sunaiva", "static assets"),
        ("database", "core", "persistent state"),
        ("AIVA", "core", "agent commands"),
        ("infra", "database", "hosts"),
    ]

    def __init__(self, repo_path: str = "/mnt/e/genesis-system") -> None:
        self.repo_path = Path(repo_path)

    # ------------------------------------------------------------------
    # Public API
    # ------------------------------------------------------------------

    def generate_all(self, output_dir: Optional[str] = None) -> Dict[str, str]:
        """
        Generate all three diagrams.

        Args:
            output_dir: If given, write each diagram to a .md file there.

        Returns:
            {"architecture": "...", "module_map": "...", "memory_flow": "..."}
        """
        diagrams = {
            "architecture": self.generate_architecture(),
            "module_map": self.generate_module_map(),
            "memory_flow": self.generate_memory_flow(),
        }

        if output_dir:
            out = Path(output_dir)
            out.mkdir(parents=True, exist_ok=True)
            for name, content in diagrams.items():
                (out / f"{name}.md").write_text(
                    f"# {name.replace('_', ' ').title()} Diagram\n\n{content}\n",
                    encoding="utf-8",
                )

        return diagrams

    def generate_architecture(self) -> str:
        """
        Scan the repository's top-level directories and produce a Mermaid
        flowchart of the system architecture with data-flow edges.

        Returns:
            Raw Mermaid block (```mermaid ... ```)
        """
        # Only include nodes that actually exist in the repo
        present: Set[str] = set()
        for node_id, _ in self.ARCH_NODES:
            if (self.repo_path / node_id).exists():
                present.add(node_id)

        lines: List[str] = ["```mermaid", "flowchart TD"]

        # Node declarations
        for node_id, label in self.ARCH_NODES:
            if node_id in present:
                safe_id = _mermaid_id(node_id)
                lines.append(f'    {safe_id}["{label}"]')

        lines.append("")

        # Edge declarations
        for src, dst, label in self.ARCH_EDGES:
            if src in present and dst in present:
                s = _mermaid_id(src)
                d = _mermaid_id(dst)
                lines.append(f'    {s} -->|"{label}"| {d}')

        lines.append("```")
        return "\n".join(lines)

    def generate_module_map(self) -> str:
        """
        Scan core/**/*.py for import statements and produce a Mermaid graph
        of module dependencies.

        Returns:
            Raw Mermaid block (```mermaid ... ```)
        """
        core_dir = self.repo_path / "core"
        imports_map: Dict[str, Set[str]] = {}

        if core_dir.exists():
            for py_file in sorted(core_dir.rglob("*.py")):
                module_name = _py_to_module_name(py_file, core_dir)
                deps = _extract_imports(py_file)
                # Keep only intra-core imports (relative or "core.*")
                internal_deps: Set[str] = set()
                for dep in deps:
                    if dep.startswith("core.") or dep.startswith("."):
                        clean = dep.lstrip(".").replace("core.", "")
                        if clean:
                            internal_deps.add(clean)
                if internal_deps or module_name:
                    imports_map[module_name] = internal_deps

        lines: List[str] = ["```mermaid", "graph LR"]

        # Collect all referenced nodes
        all_nodes: Set[str] = set(imports_map.keys())
        for deps in imports_map.values():
            all_nodes.update(deps)

        # Limit to max 40 nodes to keep diagrams readable
        representative_nodes = sorted(all_nodes)[:40]
        node_set = set(representative_nodes)

        # Emit nodes
        for node in representative_nodes:
            safe = _mermaid_id(node)
            lines.append(f'    {safe}["{node}"]')

        lines.append("")

        # Emit edges (only between nodes in our limited set)
        edge_count = 0
        for module, deps in sorted(imports_map.items()):
            if module not in node_set:
                continue
            for dep in sorted(deps):
                if dep in node_set and edge_count < 60:
                    s = _mermaid_id(module)
                    d = _mermaid_id(dep)
                    lines.append(f"    {s} --> {d}")
                    edge_count += 1

        lines.append("```")
        return "\n".join(lines)

    def generate_memory_flow(self) -> str:
        """
        Produce a Mermaid sequence diagram of the Genesis memory pipeline:
        Conversation → Aggregation → Distillation → KG Write → Qdrant → Query.

        Returns:
            Raw Mermaid block (```mermaid ... ```)
        """
        diagram = """\
```mermaid
sequenceDiagram
    participant U as User
    participant C as Claude Orchestrator
    participant G as Gemini Swarm
    participant D as memory_digestion.py
    participant KG as Knowledge Graph (JSONL)
    participant Q as Qdrant Vector Store
    participant PG as PostgreSQL (Elestio)

    U->>C: Sends message / task
    C->>G: Dispatches RWL stories
    G-->>C: Returns results
    C->>D: aggregate_session(transcript)
    D->>D: Distil key entities & axioms
    D->>KG: Write entity JSONL record
    D->>KG: Write axiom JSONL record
    D->>Q: Embed & upsert vectors
    D->>PG: Persist structured state
    Note over KG,Q: Living Memory Updated

    U->>C: Follow-up query
    C->>Q: semantic_search(query)
    Q-->>C: Top-K relevant chunks
    C->>KG: Load linked entities
    KG-->>C: Entity context
    C-->>U: Grounded response
```"""
        return diagram


# ------------------------------------------------------------------
# Module-level utilities
# ------------------------------------------------------------------

def _mermaid_id(name: str) -> str:
    """
    Convert a string to a valid Mermaid node identifier
    (alphanumeric + underscore only).
    """
    safe = re.sub(r"[^A-Za-z0-9_]", "_", name)
    # Ensure it doesn't start with a digit
    if safe and safe[0].isdigit():
        safe = "n" + safe
    return safe or "node"


def _py_to_module_name(py_file: Path, base_dir: Path) -> str:
    """
    Convert a Path like core/genesis_executor.py → "genesis_executor".
    Strips the base directory and .py suffix.
    """
    try:
        rel = py_file.relative_to(base_dir)
    except ValueError:
        rel = py_file
    parts = list(rel.parts)
    if parts[-1].endswith(".py"):
        parts[-1] = parts[-1][:-3]
    if parts[-1] == "__init__":
        parts = parts[:-1]
    return ".".join(parts) if parts else py_file.stem


def _extract_imports(py_file: Path) -> Set[str]:
    """
    Parse a Python file with ast and return a set of imported module names.
    Falls back to regex parsing if ast fails (e.g. syntax errors).
    """
    imports: Set[str] = set()
    try:
        source = py_file.read_text(encoding="utf-8", errors="replace")
    except OSError:
        return imports

    # Try ast first
    try:
        tree = ast.parse(source, filename=str(py_file))
        for node in ast.walk(tree):
            if isinstance(node, ast.Import):
                for alias in node.names:
                    imports.add(alias.name)
            elif isinstance(node, ast.ImportFrom):
                if node.module:
                    prefix = "." * (node.level or 0)
                    imports.add(prefix + node.module)
        return imports
    except SyntaxError:
        pass

    # Regex fallback
    for match in re.finditer(
        r"^\s*(?:from|import)\s+([\w.]+)", source, re.MULTILINE
    ):
        imports.add(match.group(1))
    return imports
