#!/usr/bin/env python3
"""
GENESIS CONVERSATION ANALYZER
==============================
Claude conversation history integration with summarization and pattern recognition.

Features:
    - Multi-format conversation import (JSON, JSONL, Markdown, text)
    - Pattern extraction across conversations
    - Topic clustering and trend analysis
    - Automatic summarization with key insights
    - Intent classification and tracking
    - Knowledge graph building from conversations

Conversation Pipeline:
    1. Import and normalize conversation data
    2. Extract messages and metadata
    3. Identify patterns, topics, and intents
    4. Build relationship graph
    5. Generate insights and summaries
"""

import json
import hashlib
import re
import os
from collections import Counter, defaultdict
from dataclasses import dataclass, field
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Any, Optional, Tuple, Set
from enum import Enum
import logging


logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


class MessageRole(Enum):
    """Conversation participant roles."""
    USER = "user"
    ASSISTANT = "assistant"
    SYSTEM = "system"
    HUMAN = "human"
    KINAN = "kinan"  # Special role for owner


class IntentCategory(Enum):
    """Classified intent categories."""
    QUESTION = "question"
    INSTRUCTION = "instruction"
    CLARIFICATION = "clarification"
    FEEDBACK = "feedback"
    BRAINSTORM = "brainstorm"
    DEBUG = "debug"
    BUILD = "build"
    RESEARCH = "research"
    PLANNING = "planning"
    OTHER = "other"


@dataclass
class Message:
    """A conversation message."""
    message_id: str
    conversation_id: str
    role: MessageRole
    content: str
    timestamp: Optional[str] = None
    tokens: int = 0
    metadata: Dict[str, Any] = field(default_factory=dict)


@dataclass
class Conversation:
    """A complete conversation."""
    conversation_id: str
    title: Optional[str] = None
    messages: List[Message] = field(default_factory=list)
    created_at: Optional[str] = None
    updated_at: Optional[str] = None
    topics: List[str] = field(default_factory=list)
    summary: Optional[str] = None
    metadata: Dict[str, Any] = field(default_factory=dict)


@dataclass
class ConversationPattern:
    """A detected conversation pattern."""
    pattern_id: str
    pattern_type: str  # topic, phrase, intent, workflow
    value: str
    frequency: int = 1
    conversations: List[str] = field(default_factory=list)
    examples: List[str] = field(default_factory=list)
    first_seen: str = field(default_factory=lambda: datetime.now().isoformat())
    last_seen: str = field(default_factory=lambda: datetime.now().isoformat())


@dataclass
class TopicCluster:
    """A cluster of related topics."""
    cluster_id: str
    name: str
    keywords: List[str]
    conversations: List[str] = field(default_factory=list)
    messages: List[str] = field(default_factory=list)
    importance: float = 0.0


class ConversationParser:
    """Parse conversations from various formats."""

    @staticmethod
    def parse_claude_export(file_path: Path) -> List[Conversation]:
        """Parse Claude conversation export (JSON format)."""
        conversations = []

        with open(file_path, 'r', encoding='utf-8') as f:
            data = json.load(f)

        # Handle different export formats
        if isinstance(data, list):
            conv_list = data
        elif isinstance(data, dict) and 'conversations' in data:
            conv_list = data['conversations']
        else:
            conv_list = [data]

        for conv_data in conv_list:
            conv_id = conv_data.get('uuid', conv_data.get('id', hashlib.md5(
                json.dumps(conv_data, sort_keys=True).encode()
            ).hexdigest()[:12]))

            messages = []
            for msg_data in conv_data.get('messages', conv_data.get('chat_messages', [])):
                role_str = msg_data.get('role', msg_data.get('sender', 'unknown'))
                role = MessageRole.USER if role_str in ['user', 'human'] else MessageRole.ASSISTANT

                content = msg_data.get('content', '')
                if isinstance(content, list):
                    # Handle content blocks
                    content = ' '.join(
                        block.get('text', str(block))
                        for block in content
                        if isinstance(block, dict)
                    )

                messages.append(Message(
                    message_id=msg_data.get('uuid', msg_data.get('id', hashlib.md5(
                        content.encode()
                    ).hexdigest()[:12])),
                    conversation_id=conv_id,
                    role=role,
                    content=content,
                    timestamp=msg_data.get('created_at', msg_data.get('timestamp')),
                    metadata=msg_data.get('metadata', {})
                ))

            conversations.append(Conversation(
                conversation_id=conv_id,
                title=conv_data.get('title', conv_data.get('name')),
                messages=messages,
                created_at=conv_data.get('created_at'),
                updated_at=conv_data.get('updated_at'),
                metadata=conv_data.get('metadata', {})
            ))

        return conversations

    @staticmethod
    def parse_jsonl(file_path: Path) -> List[Conversation]:
        """Parse JSONL format conversations."""
        conversations = []
        current_conv = None

        with open(file_path, 'r', encoding='utf-8') as f:
            for line in f:
                try:
                    data = json.loads(line.strip())

                    # Check if this is a new conversation marker
                    if 'conversation_id' in data and 'content' not in data:
                        if current_conv:
                            conversations.append(current_conv)
                        current_conv = Conversation(
                            conversation_id=data['conversation_id'],
                            title=data.get('title'),
                            created_at=data.get('created_at')
                        )
                    elif current_conv and 'content' in data:
                        role_str = data.get('role', 'user')
                        role = MessageRole.USER if role_str in ['user', 'human'] else MessageRole.ASSISTANT

                        current_conv.messages.append(Message(
                            message_id=data.get('id', hashlib.md5(
                                data['content'].encode()
                            ).hexdigest()[:12]),
                            conversation_id=current_conv.conversation_id,
                            role=role,
                            content=data['content'],
                            timestamp=data.get('timestamp')
                        ))
                except json.JSONDecodeError:
                    continue

        if current_conv:
            conversations.append(current_conv)

        return conversations

    @staticmethod
    def parse_markdown(file_path: Path) -> List[Conversation]:
        """Parse Markdown format conversations."""
        conversations = []

        with open(file_path, 'r', encoding='utf-8') as f:
            content = f.read()

        # Split by conversation markers
        conv_sections = re.split(r'\n#{1,2}\s+(?:Conversation|Chat|Session)\s*(?::|-)?\s*', content)

        for i, section in enumerate(conv_sections):
            if not section.strip():
                continue

            conv_id = hashlib.md5(section.encode()).hexdigest()[:12]
            messages = []

            # Find message patterns
            # Pattern: **User:** or **Assistant:** or Human: / AI:
            msg_pattern = r'(?:\*\*)?(?:User|Human|Kinan|Assistant|AI|Claude)(?:\*\*)?:\s*(.*?)(?=(?:\*\*)?(?:User|Human|Kinan|Assistant|AI|Claude)(?:\*\*)?:|$)'

            matches = re.findall(msg_pattern, section, re.DOTALL | re.IGNORECASE)

            role = MessageRole.USER
            for match in matches:
                messages.append(Message(
                    message_id=hashlib.md5(match.encode()).hexdigest()[:12],
                    conversation_id=conv_id,
                    role=role,
                    content=match.strip()
                ))
                # Alternate roles
                role = MessageRole.ASSISTANT if role == MessageRole.USER else MessageRole.USER

            if messages:
                conversations.append(Conversation(
                    conversation_id=conv_id,
                    title=f"Conversation {i+1}",
                    messages=messages
                ))

        return conversations

    @staticmethod
    def parse_text(file_path: Path) -> List[Conversation]:
        """Parse plain text conversations."""
        with open(file_path, 'r', encoding='utf-8') as f:
            content = f.read()

        conv_id = hashlib.md5(content.encode()).hexdigest()[:12]

        # Split by common delimiters
        messages = []
        parts = re.split(r'\n---+\n|\n\n\n+', content)

        role = MessageRole.USER
        for part in parts:
            if part.strip():
                messages.append(Message(
                    message_id=hashlib.md5(part.encode()).hexdigest()[:12],
                    conversation_id=conv_id,
                    role=role,
                    content=part.strip()
                ))
                role = MessageRole.ASSISTANT if role == MessageRole.USER else MessageRole.USER

        return [Conversation(
            conversation_id=conv_id,
            messages=messages
        )]


class PatternExtractor:
    """Extract patterns from conversations."""

    # Technical keywords that indicate important topics
    TECH_KEYWORDS = {
        'api', 'database', 'server', 'client', 'authentication', 'security',
        'memory', 'vector', 'embedding', 'model', 'training', 'inference',
        'pipeline', 'workflow', 'agent', 'orchestration', 'integration',
        'genesis', 'aiva', 'patent', 'validation', 'cryptographic', 'triple-gate',
        'qdrant', 'postgresql', 'redis', 'elestio', 'n8n', 'whisper',
        'claude', 'gemini', 'openai', 'anthropic', 'ghl', 'vapi', 'telnyx',
        'voice', 'transcription', 'browser', 'youtube', 'revenue', 'saas'
    }

    # Intent markers
    INTENT_MARKERS = {
        IntentCategory.QUESTION: ['what', 'why', 'how', 'when', 'where', 'who', 'can you', 'could you', '?'],
        IntentCategory.INSTRUCTION: ['please', 'create', 'build', 'make', 'implement', 'add', 'do', 'run'],
        IntentCategory.CLARIFICATION: ['i mean', 'to clarify', 'let me explain', 'what i meant'],
        IntentCategory.FEEDBACK: ['looks good', 'perfect', 'great', 'nice', 'thanks', 'not quite', 'wrong'],
        IntentCategory.BRAINSTORM: ['what if', 'we could', 'maybe', 'perhaps', 'consider', 'idea'],
        IntentCategory.DEBUG: ['error', 'bug', 'issue', 'problem', 'failed', 'not working', 'fix'],
        IntentCategory.BUILD: ['build', 'create', 'implement', 'develop', 'code', 'write'],
        IntentCategory.RESEARCH: ['research', 'find', 'look up', 'investigate', 'explore', 'learn'],
        IntentCategory.PLANNING: ['plan', 'strategy', 'roadmap', 'next steps', 'priority']
    }

    @classmethod
    def extract_topics(cls, text: str) -> List[str]:
        """Extract topic keywords from text."""
        text_lower = text.lower()
        topics = []

        for keyword in cls.TECH_KEYWORDS:
            if keyword in text_lower:
                topics.append(keyword)

        # Also extract capitalized phrases (likely proper nouns or concepts)
        proper_nouns = re.findall(r'\b[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\b', text)
        topics.extend([pn.lower() for pn in proper_nouns if len(pn) > 3])

        return list(set(topics))

    @classmethod
    def classify_intent(cls, text: str) -> IntentCategory:
        """Classify the intent of a message."""
        text_lower = text.lower()

        # Score each intent category
        scores = defaultdict(int)
        for category, markers in cls.INTENT_MARKERS.items():
            for marker in markers:
                if marker in text_lower:
                    scores[category] += 1

        if scores:
            return max(scores, key=scores.get)
        return IntentCategory.OTHER

    @classmethod
    def extract_action_items(cls, text: str) -> List[str]:
        """Extract action items from text."""
        action_items = []

        # Look for TODO patterns
        todos = re.findall(r'(?:TODO|FIXME|ACTION|NEXT):\s*(.+?)(?:\n|$)', text, re.IGNORECASE)
        action_items.extend(todos)

        # Look for imperative sentences
        imperatives = re.findall(
            r'(?:^|\.\s+)((?:Please\s+)?(?:Create|Build|Make|Implement|Add|Fix|Update|Remove|Test|Check|Review)\s+[^.!?]+[.!])',
            text, re.IGNORECASE | re.MULTILINE
        )
        action_items.extend(imperatives)

        return action_items

    @classmethod
    def extract_questions(cls, text: str) -> List[str]:
        """Extract questions from text."""
        questions = re.findall(r'[^.!?]*\?', text)
        return [q.strip() for q in questions if len(q.strip()) > 10]


class ConversationAnalyzer:
    """
    Analyze conversations for patterns, topics, and insights.

    Provides comprehensive analysis of Claude conversation history
    with pattern recognition, summarization, and knowledge extraction.
    """

    def __init__(self, storage_dir: Optional[Path] = None):
        self.storage_dir = storage_dir or Path("/mnt/e/genesis-system/data/conversation_analysis")
        self.storage_dir.mkdir(parents=True, exist_ok=True)

        self._conversations: Dict[str, Conversation] = {}
        self._patterns: Dict[str, ConversationPattern] = {}
        self._topic_clusters: Dict[str, TopicCluster] = {}

        # Statistics
        self._stats = {
            "conversations_analyzed": 0,
            "messages_processed": 0,
            "patterns_detected": 0,
            "topics_extracted": 0,
            "intents_classified": defaultdict(int)
        }

        # Load existing data
        self._load_state()

    def _load_state(self):
        """Load existing analysis state."""
        state_file = self.storage_dir / "analyzer_state.json"
        if state_file.exists():
            try:
                with open(state_file, 'r') as f:
                    data = json.load(f)
                    self._stats = data.get("stats", self._stats)

                    for p_data in data.get("patterns", []):
                        pattern = ConversationPattern(**p_data)
                        self._patterns[pattern.pattern_id] = pattern

                    for tc_data in data.get("topic_clusters", []):
                        cluster = TopicCluster(**tc_data)
                        self._topic_clusters[cluster.cluster_id] = cluster

            except Exception as e:
                logger.warning(f"Failed to load state: {e}")

    def _save_state(self):
        """Save analysis state."""
        state_file = self.storage_dir / "analyzer_state.json"
        data = {
            "updated": datetime.now().isoformat(),
            "stats": dict(self._stats),
            "patterns": [
                {
                    "pattern_id": p.pattern_id,
                    "pattern_type": p.pattern_type,
                    "value": p.value,
                    "frequency": p.frequency,
                    "conversations": p.conversations[-20:],
                    "examples": p.examples[-5:],
                    "first_seen": p.first_seen,
                    "last_seen": p.last_seen
                }
                for p in self._patterns.values()
            ],
            "topic_clusters": [
                {
                    "cluster_id": tc.cluster_id,
                    "name": tc.name,
                    "keywords": tc.keywords,
                    "conversations": tc.conversations[-20:],
                    "messages": tc.messages[-10:],
                    "importance": tc.importance
                }
                for tc in self._topic_clusters.values()
            ]
        }

        with open(state_file, 'w') as f:
            json.dump(data, f, indent=2, default=str)

    def import_conversations(self, source: Path) -> int:
        """Import conversations from file or directory."""
        imported = 0

        if source.is_dir():
            files = list(source.glob("*.json")) + list(source.glob("*.jsonl")) + \
                    list(source.glob("*.md")) + list(source.glob("*.txt"))
        else:
            files = [source]

        for file_path in files:
            try:
                ext = file_path.suffix.lower()

                if ext == ".json":
                    convs = ConversationParser.parse_claude_export(file_path)
                elif ext == ".jsonl":
                    convs = ConversationParser.parse_jsonl(file_path)
                elif ext == ".md":
                    convs = ConversationParser.parse_markdown(file_path)
                else:
                    convs = ConversationParser.parse_text(file_path)

                for conv in convs:
                    self._conversations[conv.conversation_id] = conv
                    imported += 1

                logger.info(f"Imported {len(convs)} conversations from {file_path.name}")

            except Exception as e:
                logger.error(f"Failed to import {file_path}: {e}")

        return imported

    def analyze_conversation(self, conv: Conversation) -> Dict[str, Any]:
        """Analyze a single conversation."""
        analysis = {
            "conversation_id": conv.conversation_id,
            "message_count": len(conv.messages),
            "topics": [],
            "intents": defaultdict(int),
            "action_items": [],
            "questions": [],
            "key_phrases": []
        }

        all_text = ""
        for msg in conv.messages:
            all_text += msg.content + " "
            self._stats["messages_processed"] += 1

            # Classify intent for user messages
            if msg.role == MessageRole.USER:
                intent = PatternExtractor.classify_intent(msg.content)
                analysis["intents"][intent.value] += 1
                self._stats["intents_classified"][intent.value] += 1

            # Extract action items
            actions = PatternExtractor.extract_action_items(msg.content)
            analysis["action_items"].extend(actions)

            # Extract questions
            questions = PatternExtractor.extract_questions(msg.content)
            analysis["questions"].extend(questions)

        # Extract topics from combined text
        topics = PatternExtractor.extract_topics(all_text)
        analysis["topics"] = topics
        conv.topics = topics
        self._stats["topics_extracted"] += len(topics)

        # Record patterns
        for topic in topics:
            self._record_pattern(topic, "topic", conv.conversation_id, all_text[:100])

        self._stats["conversations_analyzed"] += 1
        return analysis

    def analyze_all(self) -> Dict[str, Any]:
        """Analyze all imported conversations."""
        results = {
            "total_conversations": len(self._conversations),
            "total_messages": 0,
            "topic_frequency": Counter(),
            "intent_frequency": Counter(),
            "common_patterns": [],
            "insights": []
        }

        for conv_id, conv in self._conversations.items():
            analysis = self.analyze_conversation(conv)
            results["total_messages"] += analysis["message_count"]
            results["topic_frequency"].update(analysis["topics"])
            results["intent_frequency"].update(analysis["intents"])

        # Build topic clusters
        self._build_topic_clusters()

        # Generate insights
        results["insights"] = self._generate_insights()
        results["common_patterns"] = self.get_frequent_patterns(min_frequency=3)

        # Save state
        self._save_state()

        return results

    def _record_pattern(self, value: str, pattern_type: str, conv_id: str, example: str):
        """Record a pattern occurrence."""
        pattern_id = hashlib.md5(f"{pattern_type}:{value.lower()}".encode()).hexdigest()[:12]

        if pattern_id in self._patterns:
            pattern = self._patterns[pattern_id]
            pattern.frequency += 1
            pattern.last_seen = datetime.now().isoformat()
            if conv_id not in pattern.conversations:
                pattern.conversations.append(conv_id)
            if example not in pattern.examples:
                pattern.examples.append(example[:200])
        else:
            self._patterns[pattern_id] = ConversationPattern(
                pattern_id=pattern_id,
                pattern_type=pattern_type,
                value=value,
                frequency=1,
                conversations=[conv_id],
                examples=[example[:200]]
            )
            self._stats["patterns_detected"] += 1

    def _build_topic_clusters(self):
        """Build clusters of related topics."""
        # Simple co-occurrence clustering
        topic_cooccurrence = defaultdict(lambda: defaultdict(int))

        for conv in self._conversations.values():
            for i, topic1 in enumerate(conv.topics):
                for topic2 in conv.topics[i+1:]:
                    topic_cooccurrence[topic1][topic2] += 1
                    topic_cooccurrence[topic2][topic1] += 1

        # Create clusters based on strong co-occurrence
        processed = set()
        for topic, related in topic_cooccurrence.items():
            if topic in processed:
                continue

            strong_related = [t for t, count in related.items() if count >= 2]
            if strong_related:
                cluster_id = hashlib.md5(topic.encode()).hexdigest()[:12]
                keywords = [topic] + strong_related[:4]

                # Find conversations with these topics
                convs = [
                    conv_id for conv_id, conv in self._conversations.items()
                    if any(kw in conv.topics for kw in keywords)
                ]

                self._topic_clusters[cluster_id] = TopicCluster(
                    cluster_id=cluster_id,
                    name=topic.title(),
                    keywords=keywords,
                    conversations=convs,
                    importance=len(convs) / max(len(self._conversations), 1)
                )

                processed.add(topic)
                processed.update(strong_related)

    def _generate_insights(self) -> List[str]:
        """Generate insights from analysis."""
        insights = []

        # Top topics insight
        topic_counts = Counter()
        for conv in self._conversations.values():
            topic_counts.update(conv.topics)

        top_topics = topic_counts.most_common(5)
        if top_topics:
            insights.append(
                f"Most discussed topics: {', '.join(t[0] for t in top_topics)}"
            )

        # Intent distribution insight
        intent_counts = dict(self._stats["intents_classified"])
        if intent_counts:
            dominant = max(intent_counts, key=intent_counts.get)
            insights.append(f"Dominant conversation intent: {dominant}")

        # Pattern insight
        if self._patterns:
            top_patterns = sorted(
                self._patterns.values(),
                key=lambda x: x.frequency,
                reverse=True
            )[:3]
            if top_patterns:
                insights.append(
                    f"Recurring patterns: {', '.join(p.value for p in top_patterns)}"
                )

        # Cluster insight
        if self._topic_clusters:
            important_clusters = sorted(
                self._topic_clusters.values(),
                key=lambda x: x.importance,
                reverse=True
            )[:3]
            if important_clusters:
                insights.append(
                    f"Key topic areas: {', '.join(c.name for c in important_clusters)}"
                )

        return insights

    def get_frequent_patterns(self, min_frequency: int = 2, pattern_type: Optional[str] = None) -> List[Dict]:
        """Get frequently occurring patterns."""
        patterns = [
            {
                "pattern_id": p.pattern_id,
                "type": p.pattern_type,
                "value": p.value,
                "frequency": p.frequency,
                "conversations": len(p.conversations)
            }
            for p in self._patterns.values()
            if p.frequency >= min_frequency
            and (pattern_type is None or p.pattern_type == pattern_type)
        ]
        return sorted(patterns, key=lambda x: x["frequency"], reverse=True)

    def summarize_conversation(self, conv_id: str) -> str:
        """Generate a summary of a specific conversation."""
        if conv_id not in self._conversations:
            return "Conversation not found"

        conv = self._conversations[conv_id]

        # Extract key elements
        user_messages = [m for m in conv.messages if m.role == MessageRole.USER]
        assistant_messages = [m for m in conv.messages if m.role == MessageRole.ASSISTANT]

        topics = conv.topics or PatternExtractor.extract_topics(
            " ".join(m.content for m in conv.messages)
        )

        # Build summary
        summary = f"""Conversation Summary ({conv_id[:8]})
{'=' * 40}
Title: {conv.title or 'Untitled'}
Messages: {len(conv.messages)} ({len(user_messages)} user, {len(assistant_messages)} assistant)
Topics: {', '.join(topics[:5]) if topics else 'None detected'}

Key User Requests:
"""
        for msg in user_messages[:3]:
            intent = PatternExtractor.classify_intent(msg.content)
            summary += f"  - [{intent.value}] {msg.content[:80]}...\n"

        return summary

    def export_knowledge_graph(self, output_path: Path) -> int:
        """Export conversations as knowledge graph entries."""
        exported = 0

        with open(output_path, 'w') as f:
            # Export conversations as entities
            for conv_id, conv in self._conversations.items():
                entry = {
                    "type": "conversation",
                    "id": conv_id,
                    "title": conv.title,
                    "topics": conv.topics,
                    "message_count": len(conv.messages),
                    "created_at": conv.created_at
                }
                f.write(json.dumps(entry) + "\n")
                exported += 1

            # Export patterns as relationships
            for pattern in self._patterns.values():
                entry = {
                    "type": "pattern",
                    "id": pattern.pattern_id,
                    "pattern_type": pattern.pattern_type,
                    "value": pattern.value,
                    "frequency": pattern.frequency,
                    "conversations": pattern.conversations
                }
                f.write(json.dumps(entry) + "\n")
                exported += 1

            # Export topic clusters
            for cluster in self._topic_clusters.values():
                entry = {
                    "type": "topic_cluster",
                    "id": cluster.cluster_id,
                    "name": cluster.name,
                    "keywords": cluster.keywords,
                    "importance": cluster.importance
                }
                f.write(json.dumps(entry) + "\n")
                exported += 1

        return exported

    def get_stats(self) -> Dict[str, Any]:
        """Get analyzer statistics."""
        return {
            **self._stats,
            "conversations_loaded": len(self._conversations),
            "patterns_stored": len(self._patterns),
            "topic_clusters": len(self._topic_clusters)
        }


def main():
    """CLI for Conversation Analyzer."""
    import argparse

    parser = argparse.ArgumentParser(description="Genesis Conversation Analyzer")
    parser.add_argument("command", choices=["import", "analyze", "patterns", "summary", "export", "stats"])
    parser.add_argument("--source", help="Source file or directory")
    parser.add_argument("--conv-id", help="Conversation ID for summary")
    parser.add_argument("--output", help="Output path for export")
    parser.add_argument("-n", type=int, default=10, help="Number of items")
    args = parser.parse_args()

    analyzer = ConversationAnalyzer()

    if args.command == "import":
        if not args.source:
            print("Usage: --source path/to/conversations")
            return

        count = analyzer.import_conversations(Path(args.source))
        print(f"Imported {count} conversations")

    elif args.command == "analyze":
        print("Analyzing all conversations...")
        results = analyzer.analyze_all()

        print(f"\nAnalysis Results:")
        print(f"  Conversations: {results['total_conversations']}")
        print(f"  Messages: {results['total_messages']}")
        print(f"\nTop Topics:")
        for topic, count in results["topic_frequency"].most_common(10):
            print(f"    {topic}: {count}")
        print(f"\nInsights:")
        for insight in results["insights"]:
            print(f"  - {insight}")

    elif args.command == "patterns":
        patterns = analyzer.get_frequent_patterns(min_frequency=2)
        print("Frequent Patterns:")
        print("=" * 40)
        for p in patterns[:args.n]:
            print(f"  [{p['type']}] {p['value']} (x{p['frequency']} in {p['conversations']} convs)")

    elif args.command == "summary":
        if not args.conv_id:
            # Show available conversations
            for conv_id in list(analyzer._conversations.keys())[:10]:
                print(f"  {conv_id}")
            return

        print(analyzer.summarize_conversation(args.conv_id))

    elif args.command == "export":
        output = Path(args.output) if args.output else Path("conversation_graph.jsonl")
        count = analyzer.export_knowledge_graph(output)
        print(f"Exported {count} entries to {output}")

    elif args.command == "stats":
        stats = analyzer.get_stats()
        print("Analyzer Statistics:")
        print("=" * 40)
        print(json.dumps(stats, indent=2, default=str))


if __name__ == "__main__":
    main()
