"""
Prompt Injection Defense System for ReceptionistAI.

This module provides comprehensive protection against prompt injection attacks,
content policy violations, and system prompt leakage for a receptionist AI service.

Security Features:
- Input sanitization against known injection vectors
- Content filtering for offensive/abusive language
- Behavioral guardrails to prevent system prompt leakage
- Comprehensive security auditing and logging
- Unicode normalization to prevent homoglyph attacks
"""

import re
import logging
import hashlib
import json
import unicodedata
from typing import Optional, List, Dict, Any, Tuple, Set, Pattern
from enum import Enum
from dataclasses import dataclass, field
from datetime import datetime
from functools import lru_cache
import secrets

# Optional: Use pydantic for strict input validation if available
try:
    from pydantic import BaseModel, Field, validator
    HAS_PYDANTIC = True
except ImportError:
    HAS_PYDANTIC = False
    BaseModel = object


class SecurityLevel(Enum):
    """Security classification levels for inputs."""
    SAFE = "safe"
    SUSPICIOUS = "suspicious"  # Flagged for review but allowed
    BLOCKED = "blocked"        # Immediate rejection
    ENCRYPTED = "encrypted"    # Contains potential encoding tricks


class ViolationType(Enum):
    """Types of security violations detected."""
    PROMPT_INJECTION = "prompt_injection"
    OFFENSIVE_CONTENT = "offensive_content"
    SYSTEM_PROMPT_LEAKAGE = "system_prompt_leakage"
    COMPETITOR_MENTION = "competitor_mention"
    PROMISE_OUT_OF_SCOPE = "promise_out_of_scope"
    DELIMITER_MANIPULATION = "delimiter_manipulation"
    ENCODING_OBFUSCATION = "encoding_obfuscation"


@dataclass
class SanitizationResult:
    """Result of input sanitization and security checks."""
    original_input: str
    sanitized_input: str
    security_level: SecurityLevel
    violations: List[ViolationType] = field(default_factory=list)
    confidence_score: float = 0.0  # 0.0 to 1.0, higher = more suspicious
    metadata: Dict[str, Any] = field(default_factory=dict)
    correlation_id: str = field(default_factory=lambda: secrets.token_hex(8))
    timestamp: datetime = field(default_factory=datetime.utcnow)
    
    def to_log_dict(self) -> Dict[str, Any]:
        """Convert to dictionary safe for logging (excludes full input for PII)."""
        return {
            "correlation_id": self.correlation_id,
            "security_level": self.security_level.value,
            "violations": [v.value for v in self.violations],
            "confidence_score": self.confidence_score,
            "input_hash": hashlib.sha256(self.original_input.encode()).hexdigest()[:16],
            "input_length": len(self.original_input),
            "timestamp": self.timestamp.isoformat(),
            "metadata": self.metadata
        }


@dataclass
class GuardrailConfig:
    """Configuration for behavioral guardrails."""
    system_prompt_reveal_protection: bool = True
    competitor_info_protection: bool = True
    scope_limitation: bool = True
    max_input_length: int = 4000
    allow_urls: bool = False
    restricted_topics: List[str] = field(default_factory=list)


class PromptGuard:
    """
    Production-grade prompt injection defense system.
    
    Implements defense in depth:
    1. Input normalization (Unicode, encoding)
    2. Pattern matching for injection attempts
    3. Content policy enforcement
    4. Behavioral guardrails via prompt engineering
    5. Output validation
    """
    
    # Known injection vectors - compiled once for performance
    INJECTION_PATTERNS: List[Tuple[Pattern, ViolationType, float]] = [
        # Direct instruction overrides
        (re.compile(r'ignore\s+(?:previous\s+)?instructions?', re.IGNORECASE), 
         ViolationType.PROMPT_INJECTION, 0.9),
        (re.compile(r'disregard\s+(?:the\s+)?(?:above|previous|prior)', re.IGNORECASE), 
         ViolationType.PROMPT_INJECTION, 0.9),
        (re.compile(r'forget\s+(?:everything|your\s+instructions|your\s+training)', re.IGNORECASE), 
         ViolationType.PROMPT_INJECTION, 0.95),
        
        # System prompt access attempts
        (re.compile(r'system\s*prompt|initial\s*prompt|base\s*instructions?', re.IGNORECASE), 
         ViolationType.SYSTEM_PROMPT_LEAKAGE, 0.85),
        (re.compile(r'what\s+(?:are|were)\s+your\s+instructions?', re.IGNORECASE), 
         ViolationType.SYSTEM_PROMPT_LEAKAGE, 0.8),
        (re.compile(r'repeat\s+(?:your\s+)?(?:system\s+)?prompt', re.IGNORECASE), 
         ViolationType.SYSTEM_PROMPT_LEAKAGE, 0.9),
        
        # Role switching
        (re.compile(r'you\s+(?:are|become|now\s+is)\s+(?:an?\s+)?(?:assistant|ai|bot)', re.IGNORECASE), 
         ViolationType.PROMPT_INJECTION, 0.7),
        (re.compile(r'act\s+(?:as|like)\s+(?:a\s+)?', re.IGNORECASE), 
         ViolationType.PROMPT_INJECTION, 0.6),
        (re.compile(r'switch\s+(?:to|into)\s+(?:mode|role)', re.IGNORECASE), 
         ViolationType.PROMPT_INJECTION, 0.8),
        
        # Delimiter manipulation
        (re.compile(r'