"""
Content Analyzer - Phase 2 Stories 11-22
=========================================
Deep content analysis of Kinan's 615 conversations.

Stories:
- 11: Topic Classification Engine
- 12: Intent Pattern Detector
- 13: Named Entity Extractor
- 14: Question Pattern Analyzer
- 15: Emotional Journey Mapper
- 16: Frustration/Difficulty Detector
- 17: Innovation Moment Detector
- 18: Decision Point Extractor
- 19: Tool/Platform Usage Tracker
- 20: Code Pattern Analyzer
- 21: Strategic Priority Ranker
- 22: Conversation Quality Scorer
"""

import json
import re
from pathlib import Path
from datetime import datetime
from typing import List, Dict, Any, Set, Tuple, Optional
from dataclasses import dataclass, field, asdict
from collections import defaultdict, Counter
import logging

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("content_analyzer")

# =============================================================================
# PATHS
# =============================================================================

OUTPUT_BASE = Path("/mnt/e/genesis-system/KNOWLEDGE_GRAPH/creator_mind")
ARCHIVE_PATH = OUTPUT_BASE / "conversations_archive.jsonl"
ANALYSIS_DIR = OUTPUT_BASE / "analysis"
ANALYSIS_DIR.mkdir(exist_ok=True)

# =============================================================================
# STORY 11: Topic Classification
# =============================================================================

TOPIC_KEYWORDS = {
    'ai_agents': ['agent', 'autonomous', 'agentic', 'multi-agent', 'swarm', 'orchestrat'],
    'voice_ai': ['voice', 'speech', 'tts', 'stt', 'vapi', 'telnyx', 'elevenlabs', 'whisper'],
    'revenue': ['revenue', 'pricing', 'monetize', 'sell', 'client', 'customer', 'payment', 'subscription'],
    'patents': ['patent', 'intellectual property', 'ip', 'invention', 'claims', 'prior art'],
    'memory': ['memory', 'context', 'remember', 'forget', 'persistence', 'semantic', 'episodic'],
    'automation': ['automat', 'workflow', 'n8n', 'zapier', 'trigger', 'webhook'],
    'ghl': ['gohighlevel', 'ghl', 'highlevel', 'snapshot', 'subaccount'],
    'coding': ['code', 'python', 'typescript', 'javascript', 'api', 'function', 'class', 'debug'],
    'infrastructure': ['deploy', 'docker', 'server', 'database', 'postgres', 'redis', 'qdrant'],
    'tradie': ['tradie', 'tradesperson', 'plumber', 'electrician', 'contractor', 'trades'],
    'marketing': ['marketing', 'leads', 'campaign', 'email', 'outreach', 'social media'],
    'strategy': ['strategy', 'strategic', 'vision', 'mission', 'roadmap', 'pivot'],
    'learning': ['learn', 'understand', 'figure out', 'research', 'study', 'explore'],
    'design': ['design', 'ui', 'ux', 'interface', 'layout', 'visual', 'brand'],
    'integration': ['integrat', 'connect', 'bridge', 'sync', 'api', 'mcp'],
    'testing': ['test', 'verify', 'validate', 'check', 'debug', 'error'],
    'security': ['security', 'auth', 'credential', 'token', 'secret', 'permission'],
    'performance': ['performance', 'speed', 'latency', 'optimize', 'fast', 'slow'],
    'cost': ['cost', 'budget', 'expense', 'cheap', 'expensive', 'pricing'],
    'genesis': ['genesis', 'aiva', 'queen', 'system', 'kernel', 'heartbeat'],
    'browser': ['browser', 'playwright', 'puppeteer', 'selenium', 'scrape', 'crawl'],
    'knowledge_graph': ['knowledge graph', 'entity', 'relationship', 'axiom', 'ontology'],
    'llm': ['llm', 'claude', 'gpt', 'gemini', 'model', 'prompt', 'token'],
    'startup': ['startup', 'founder', 'mvp', 'launch', 'scale', 'growth'],
    'personal': ['family', 'health', 'balance', 'rest', 'vacation', 'personal'],
}

# =============================================================================
# STORY 12: Intent Classification
# =============================================================================

INTENT_PATTERNS = {
    'question': [r'\?$', r'^how', r'^what', r'^why', r'^when', r'^where', r'^can you', r'^could you'],
    'instruction': [r'^please', r'^can you', r'^I need you to', r'^do this', r'^make sure', r'^implement'],
    'brainstorm': [r'what if', r'maybe we', r'idea:', r'thinking about', r'consider', r'explore'],
    'debug': [r'error', r'not working', r'broken', r'bug', r'issue', r'problem', r'fix'],
    'planning': [r'plan', r'roadmap', r'next step', r'priority', r'schedule', r'milestone'],
    'feedback': [r'good job', r'well done', r'not quite', r'wrong', r'correct', r'perfect'],
    'research': [r'look into', r'find out', r'research', r'investigate', r'analyze', r'study'],
    'decision': [r'decide', r'choice', r'option', r'go with', r'choose', r'pick'],
    'build': [r'build', r'create', r'implement', r'develop', r'code', r'write'],
    'review': [r'review', r'check', r'look at', r'examine', r'audit', r'assess'],
}

# =============================================================================
# STORY 15: Sentiment/Emotion Patterns
# =============================================================================

EMOTION_PATTERNS = {
    'excited': [r'exciting', r'amazing', r'incredible', r'awesome', r'fantastic', r'love this', r'brilliant'],
    'frustrated': [r'frustrat', r'annoying', r'stuck', r'ugh', r'damn', r'argh', r'hate'],
    'confident': [r'confident', r'sure', r'certain', r'definitely', r'absolutely', r'no doubt'],
    'uncertain': [r'not sure', r'maybe', r'might', r'possibly', r'unclear', r'confused'],
    'urgent': [r'urgent', r'asap', r'immediately', r'critical', r'now', r'hurry'],
    'satisfied': [r'perfect', r'exactly', r'great', r'thank', r'appreciate', r'happy'],
    'curious': [r'wonder', r'curious', r'interesting', r'intrigued', r'fascinating'],
    'tired': [r'tired', r'exhausted', r'burnt out', r'need rest', r'long day'],
}

# =============================================================================
# STORY 17: Innovation Patterns
# =============================================================================

INNOVATION_PATTERNS = [
    r'(?:aha|eureka|breakthrough)',
    r'(?:just realized|figured out|discovered)',
    r'(?:new idea|novel approach|innovative)',
    r'(?:nobody.+doing|first to|unique)',
    r'(?:game.?changer|paradigm shift)',
    r'(?:what if we.+instead)',
    r'(?:combine|merge|synthesize).+(?:with|and)',
]

# =============================================================================
# STORY 18: Decision Patterns
# =============================================================================

DECISION_PATTERNS = [
    r"(?:i've decided|decision:|we'll go with|choosing|picked|selected)\s+(.{10,150})",
    r"(?:let's use|going with|opting for)\s+(.{10,100})",
    r"(?:after.+consideration|weighing options).+(?:we'll|i'll)\s+(.{10,100})",
]

# =============================================================================
# STORY 19: Tool/Platform Tracking
# =============================================================================

TOOLS_LIST = [
    'claude', 'gpt', 'gemini', 'openai', 'anthropic', 'google',
    'vapi', 'telnyx', 'twilio', 'elevenlabs',
    'gohighlevel', 'ghl', 'highlevel',
    'n8n', 'zapier', 'make.com', 'integromat',
    'supabase', 'firebase', 'postgres', 'mongodb', 'redis', 'qdrant',
    'vercel', 'railway', 'render', 'heroku', 'aws', 'gcp', 'azure',
    'stripe', 'paypal', 'lemonsqueezy',
    'github', 'gitlab', 'bitbucket',
    'slack', 'discord', 'telegram',
    'notion', 'obsidian', 'roam',
    'figma', 'canva',
    'instantly', 'lemlist', 'apollo',
    'playwright', 'puppeteer', 'selenium',
    'docker', 'kubernetes',
    'python', 'typescript', 'javascript', 'rust', 'go',
    'react', 'vue', 'svelte', 'nextjs', 'nuxt',
    'tailwind', 'shadcn',
]

# =============================================================================
# DATA STRUCTURES
# =============================================================================

@dataclass
class ConversationAnalysis:
    """Complete analysis of a single conversation"""
    uuid: str
    name: str
    created_at: str
    updated_at: str

    # Story 11: Topics
    topics: Dict[str, float] = field(default_factory=dict)  # topic -> confidence
    primary_topic: str = ""

    # Story 12: Intents
    intents: Dict[str, int] = field(default_factory=dict)  # intent -> count
    primary_intent: str = ""

    # Story 13: Entities
    entities: Dict[str, List[str]] = field(default_factory=dict)  # type -> list

    # Story 14: Questions
    question_count: int = 0
    key_questions: List[str] = field(default_factory=list)

    # Story 15: Emotions
    emotions: Dict[str, int] = field(default_factory=dict)
    sentiment_score: float = 0.0  # -1 to 1

    # Story 16: Frustration
    frustration_moments: int = 0
    frustration_topics: List[str] = field(default_factory=list)

    # Story 17: Innovation
    innovation_moments: int = 0
    innovations: List[str] = field(default_factory=list)

    # Story 18: Decisions
    decisions: List[str] = field(default_factory=list)

    # Story 19: Tools
    tools_mentioned: List[str] = field(default_factory=list)

    # Story 20: Code
    code_snippets: int = 0
    languages_used: List[str] = field(default_factory=list)

    # Story 22: Quality
    quality_score: float = 0.0
    insight_density: float = 0.0

    def to_dict(self) -> Dict[str, Any]:
        return asdict(self)

@dataclass
class TopicEvolution:
    """Track topic mentions over time"""
    topic: str
    first_seen: str
    last_seen: str
    total_mentions: int
    monthly_counts: Dict[str, int] = field(default_factory=dict)

@dataclass
class ToolUsage:
    """Track tool usage patterns"""
    tool: str
    first_mentioned: str
    last_mentioned: str
    mention_count: int
    associated_topics: List[str] = field(default_factory=list)

# =============================================================================
# MAIN ANALYZER
# =============================================================================

class ContentAnalyzer:
    """
    Phase 2: Deep content analysis of all conversations
    """

    def __init__(self):
        self.analyses: List[ConversationAnalysis] = []
        self.topic_evolution: Dict[str, TopicEvolution] = {}
        self.tool_usage: Dict[str, ToolUsage] = {}
        self.global_stats = {
            'total_conversations': 0,
            'total_human_messages': 0,
            'total_questions': 0,
            'total_decisions': 0,
            'total_innovations': 0,
            'total_frustrations': 0,
            'topic_distribution': Counter(),
            'intent_distribution': Counter(),
            'emotion_distribution': Counter(),
            'tool_distribution': Counter(),
        }

        # Compile patterns
        self.intent_compiled = {
            intent: [re.compile(p, re.IGNORECASE) for p in patterns]
            for intent, patterns in INTENT_PATTERNS.items()
        }
        self.emotion_compiled = {
            emotion: [re.compile(p, re.IGNORECASE) for p in patterns]
            for emotion, patterns in EMOTION_PATTERNS.items()
        }
        self.innovation_compiled = [re.compile(p, re.IGNORECASE) for p in INNOVATION_PATTERNS]
        self.decision_compiled = [re.compile(p, re.IGNORECASE) for p in DECISION_PATTERNS]

    # =========================================================================
    # STORY 11: Topic Classification
    # =========================================================================

    def classify_topics(self, text: str) -> Dict[str, float]:
        """Classify text into topics with confidence scores"""
        text_lower = text.lower()
        topic_scores = {}

        for topic, keywords in TOPIC_KEYWORDS.items():
            score = 0
            for keyword in keywords:
                count = len(re.findall(keyword, text_lower))
                score += count

            if score > 0:
                # Normalize by text length
                topic_scores[topic] = min(1.0, score / (len(text.split()) / 100 + 1))

        return topic_scores

    # =========================================================================
    # STORY 12: Intent Detection
    # =========================================================================

    def detect_intents(self, messages: List[Dict]) -> Dict[str, int]:
        """Detect intents from human messages"""
        intent_counts = Counter()

        for msg in messages:
            if msg.get('sender') != 'human':
                continue

            text = msg.get('text', '')

            for intent, patterns in self.intent_compiled.items():
                for pattern in patterns:
                    if pattern.search(text):
                        intent_counts[intent] += 1
                        break

        return dict(intent_counts)

    # =========================================================================
    # STORY 13: Entity Extraction
    # =========================================================================

    def extract_entities(self, text: str) -> Dict[str, List[str]]:
        """Extract named entities from text"""
        entities = {
            'people': [],
            'tools': [],
            'urls': [],
            'files': [],
        }

        # People (simple name pattern)
        names = re.findall(r'\b([A-Z][a-z]+\s+[A-Z][a-z]+)\b', text)
        entities['people'] = list(set(names))[:10]

        # Tools
        text_lower = text.lower()
        for tool in TOOLS_LIST:
            if tool in text_lower:
                entities['tools'].append(tool)
        entities['tools'] = list(set(entities['tools']))

        # URLs
        urls = re.findall(r'https?://[^\s<>"{}|\\^`\[\]]+', text)
        entities['urls'] = urls[:5]

        # Files
        files = re.findall(r'[\w/\\]+\.\w{2,4}\b', text)
        entities['files'] = list(set(files))[:10]

        return entities

    # =========================================================================
    # STORY 14: Question Analysis
    # =========================================================================

    def extract_questions(self, text: str) -> List[str]:
        """Extract questions from text"""
        # Find sentences ending with ?
        questions = re.findall(r'[^.!?]*\?', text)
        # Clean up
        questions = [q.strip() for q in questions if len(q.strip()) > 10]
        return questions[:10]

    # =========================================================================
    # STORY 15: Emotion/Sentiment
    # =========================================================================

    def analyze_emotions(self, text: str) -> Tuple[Dict[str, int], float]:
        """Analyze emotions and calculate sentiment score"""
        emotion_counts = Counter()

        for emotion, patterns in self.emotion_compiled.items():
            for pattern in patterns:
                count = len(pattern.findall(text))
                if count:
                    emotion_counts[emotion] += count

        # Calculate sentiment (-1 to 1)
        positive = emotion_counts.get('excited', 0) + emotion_counts.get('satisfied', 0) + emotion_counts.get('confident', 0)
        negative = emotion_counts.get('frustrated', 0) + emotion_counts.get('tired', 0)
        total = positive + negative + 1
        sentiment = (positive - negative) / total

        return dict(emotion_counts), sentiment

    # =========================================================================
    # STORY 16: Frustration Detection
    # =========================================================================

    def detect_frustration(self, text: str, topics: Dict[str, float]) -> Tuple[int, List[str]]:
        """Detect frustration moments and associated topics"""
        frustration_patterns = [
            r'frustrat', r'stuck', r'not working', r'broken', r'ugh', r'damn',
            r'annoying', r'waste of time', r'giving up', r'impossible'
        ]

        count = 0
        for pattern in frustration_patterns:
            count += len(re.findall(pattern, text, re.IGNORECASE))

        # Get associated topics when frustrated
        frustration_topics = [t for t, s in topics.items() if s > 0.3] if count > 0 else []

        return count, frustration_topics

    # =========================================================================
    # STORY 17: Innovation Detection
    # =========================================================================

    def detect_innovations(self, text: str) -> List[str]:
        """Detect innovation/breakthrough moments"""
        innovations = []

        for pattern in self.innovation_compiled:
            matches = pattern.findall(text)
            innovations.extend(matches if matches else [])

        # Also look for "aha" context
        aha_context = re.findall(r'.{0,50}(?:realized|figured out|idea).{0,100}', text, re.IGNORECASE)
        innovations.extend(aha_context)

        return innovations[:5]

    # =========================================================================
    # STORY 18: Decision Extraction
    # =========================================================================

    def extract_decisions(self, text: str) -> List[str]:
        """Extract decisions made"""
        decisions = []

        for pattern in self.decision_compiled:
            matches = pattern.findall(text)
            decisions.extend(matches)

        return [d.strip()[:150] for d in decisions][:5]

    # =========================================================================
    # STORY 19: Tool Tracking
    # =========================================================================

    def track_tools(self, text: str) -> List[str]:
        """Track tools/platforms mentioned"""
        text_lower = text.lower()
        tools = []

        for tool in TOOLS_LIST:
            if re.search(r'\b' + re.escape(tool) + r'\b', text_lower):
                tools.append(tool)

        return list(set(tools))

    # =========================================================================
    # STORY 20: Code Pattern Analysis
    # =========================================================================

    def analyze_code(self, text: str) -> Tuple[int, List[str]]:
        """Analyze code snippets in conversation"""
        # Count code blocks
        code_blocks = re.findall(r'```[\s\S]*?```', text)

        # Detect languages
        languages = set()
        lang_patterns = {
            'python': r'(?:def |import |class |\.py\b)',
            'javascript': r'(?:const |let |function |\.js\b|=>)',
            'typescript': r'(?:interface |type |\.ts\b|: string|: number)',
            'sql': r'(?:SELECT |INSERT |UPDATE |CREATE TABLE)',
            'bash': r'(?:#!/|sudo |apt |npm |pip )',
        }

        for lang, pattern in lang_patterns.items():
            if re.search(pattern, text, re.IGNORECASE):
                languages.add(lang)

        return len(code_blocks), list(languages)

    # =========================================================================
    # STORY 22: Quality Scoring
    # =========================================================================

    def calculate_quality_score(self, analysis: ConversationAnalysis) -> float:
        """Calculate conversation quality/insight density score"""
        score = 0.0

        # Decisions are high value
        score += len(analysis.decisions) * 2

        # Innovations are high value
        score += analysis.innovation_moments * 3

        # Multiple topics indicate depth
        score += len(analysis.topics) * 0.5

        # Questions indicate curiosity
        score += min(analysis.question_count * 0.2, 2)

        # Tool mentions indicate practical work
        score += len(analysis.tools_mentioned) * 0.3

        # Penalize high frustration without resolution
        if analysis.frustration_moments > 3:
            score -= 1

        # Normalize to 0-10
        return min(10, max(0, score))

    # =========================================================================
    # MAIN PIPELINE
    # =========================================================================

    def analyze_conversation(self, conv: Dict[str, Any]) -> ConversationAnalysis:
        """Analyze a single conversation"""
        uuid = conv['uuid']
        name = conv.get('name', 'Untitled')
        created_at = conv.get('created_at', '')
        updated_at = conv.get('updated_at', '')
        messages = conv.get('messages', [])

        # Combine all human text
        human_text = ' '.join([
            m.get('text', '') for m in messages
            if m.get('sender') == 'human'
        ])

        all_text = ' '.join([m.get('text', '') for m in messages])

        # Run all analyses
        topics = self.classify_topics(human_text)
        intents = self.detect_intents(messages)
        entities = self.extract_entities(all_text)
        questions = self.extract_questions(human_text)
        emotions, sentiment = self.analyze_emotions(human_text)
        frustration_count, frustration_topics = self.detect_frustration(human_text, topics)
        innovations = self.detect_innovations(human_text)
        decisions = self.extract_decisions(human_text)
        tools = self.track_tools(all_text)
        code_count, languages = self.analyze_code(all_text)

        analysis = ConversationAnalysis(
            uuid=uuid,
            name=name,
            created_at=created_at,
            updated_at=updated_at,
            topics=topics,
            primary_topic=max(topics, key=topics.get) if topics else "",
            intents=intents,
            primary_intent=max(intents, key=intents.get) if intents else "",
            entities=entities,
            question_count=len(questions),
            key_questions=questions[:5],
            emotions=emotions,
            sentiment_score=sentiment,
            frustration_moments=frustration_count,
            frustration_topics=frustration_topics,
            innovation_moments=len(innovations),
            innovations=innovations,
            decisions=decisions,
            tools_mentioned=tools,
            code_snippets=code_count,
            languages_used=languages,
        )

        # Calculate quality score
        analysis.quality_score = self.calculate_quality_score(analysis)
        analysis.insight_density = analysis.quality_score / max(1, len(messages))

        return analysis

    def update_global_stats(self, analysis: ConversationAnalysis):
        """Update global statistics"""
        self.global_stats['total_conversations'] += 1
        self.global_stats['total_questions'] += analysis.question_count
        self.global_stats['total_decisions'] += len(analysis.decisions)
        self.global_stats['total_innovations'] += analysis.innovation_moments
        self.global_stats['total_frustrations'] += analysis.frustration_moments

        for topic in analysis.topics:
            self.global_stats['topic_distribution'][topic] += 1

        for intent, count in analysis.intents.items():
            self.global_stats['intent_distribution'][intent] += count

        for emotion, count in analysis.emotions.items():
            self.global_stats['emotion_distribution'][emotion] += count

        for tool in analysis.tools_mentioned:
            self.global_stats['tool_distribution'][tool] += 1

    def run(self):
        """Execute full content analysis"""
        logger.info("=" * 60)
        logger.info("PHASE 2: CONTENT ANALYSIS")
        logger.info("Stories 11-22")
        logger.info("=" * 60)

        conv_count = 0

        with open(ARCHIVE_PATH) as f:
            for line in f:
                conv = json.loads(line)
                analysis = self.analyze_conversation(conv)
                self.analyses.append(analysis)
                self.update_global_stats(analysis)

                conv_count += 1
                if conv_count % 100 == 0:
                    logger.info(f"Analyzed {conv_count} conversations")

        logger.info(f"Completed analysis of {conv_count} conversations")

        # Generate outputs
        self.save_results()
        self.generate_report()

        return self.analyses

    def save_results(self):
        """Save all analysis results"""
        # Individual conversation analyses
        with open(ANALYSIS_DIR / "conversation_analyses.jsonl", 'w') as f:
            for analysis in self.analyses:
                f.write(json.dumps(analysis.to_dict()) + '\n')

        # Topic distribution
        with open(ANALYSIS_DIR / "topic_distribution.json", 'w') as f:
            json.dump(dict(self.global_stats['topic_distribution']), f, indent=2)

        # Intent distribution
        with open(ANALYSIS_DIR / "intent_distribution.json", 'w') as f:
            json.dump(dict(self.global_stats['intent_distribution']), f, indent=2)

        # Emotion distribution
        with open(ANALYSIS_DIR / "emotion_distribution.json", 'w') as f:
            json.dump(dict(self.global_stats['emotion_distribution']), f, indent=2)

        # Tool usage
        with open(ANALYSIS_DIR / "tool_usage.json", 'w') as f:
            json.dump(dict(self.global_stats['tool_distribution']), f, indent=2)

        # Top quality conversations
        top_quality = sorted(self.analyses, key=lambda x: x.quality_score, reverse=True)[:50]
        with open(ANALYSIS_DIR / "top_quality_conversations.jsonl", 'w') as f:
            for analysis in top_quality:
                f.write(json.dumps({
                    'uuid': analysis.uuid,
                    'name': analysis.name,
                    'quality_score': analysis.quality_score,
                    'primary_topic': analysis.primary_topic,
                    'decisions': len(analysis.decisions),
                    'innovations': analysis.innovation_moments,
                }) + '\n')

        # All decisions extracted
        with open(ANALYSIS_DIR / "all_decisions.jsonl", 'w') as f:
            for analysis in self.analyses:
                for decision in analysis.decisions:
                    f.write(json.dumps({
                        'conversation': analysis.uuid,
                        'date': analysis.created_at,
                        'decision': decision,
                        'topic': analysis.primary_topic,
                    }) + '\n')

        # All innovations
        with open(ANALYSIS_DIR / "all_innovations.jsonl", 'w') as f:
            for analysis in self.analyses:
                for innovation in analysis.innovations:
                    f.write(json.dumps({
                        'conversation': analysis.uuid,
                        'date': analysis.created_at,
                        'innovation': innovation,
                        'topic': analysis.primary_topic,
                    }) + '\n')

        logger.info(f"Results saved to {ANALYSIS_DIR}")

    def generate_report(self):
        """Generate summary report"""
        stats = self.global_stats

        logger.info("\n" + "=" * 60)
        logger.info("CONTENT ANALYSIS REPORT")
        logger.info("=" * 60)

        logger.info(f"\n--- OVERVIEW ---")
        logger.info(f"Conversations analyzed: {stats['total_conversations']}")
        logger.info(f"Total questions asked: {stats['total_questions']}")
        logger.info(f"Total decisions made: {stats['total_decisions']}")
        logger.info(f"Innovation moments: {stats['total_innovations']}")
        logger.info(f"Frustration moments: {stats['total_frustrations']}")

        logger.info(f"\n--- Story 11: TOP TOPICS ---")
        for topic, count in stats['topic_distribution'].most_common(10):
            logger.info(f"  {topic}: {count} conversations")

        logger.info(f"\n--- Story 12: INTENT DISTRIBUTION ---")
        for intent, count in stats['intent_distribution'].most_common(10):
            logger.info(f"  {intent}: {count}")

        logger.info(f"\n--- Story 15: EMOTION DISTRIBUTION ---")
        for emotion, count in stats['emotion_distribution'].most_common(10):
            logger.info(f"  {emotion}: {count}")

        logger.info(f"\n--- Story 19: TOP TOOLS ---")
        for tool, count in stats['tool_distribution'].most_common(15):
            logger.info(f"  {tool}: {count}")

        logger.info(f"\n--- Story 22: TOP QUALITY CONVERSATIONS ---")
        top = sorted(self.analyses, key=lambda x: x.quality_score, reverse=True)[:5]
        for a in top:
            logger.info(f"  [{a.quality_score:.1f}] {a.name[:50]}")


# =============================================================================
# CLI
# =============================================================================

if __name__ == "__main__":
    analyzer = ContentAnalyzer()
    analyzer.run()
