
"""
Agentic RAG Implementation
Uses an LLM agent to decide IF to retrieve and WHAT to retrieve.
ADAPTED: Uses Anthropic Tool Use.
"""

import json
from typing import List, Dict
from config import config
from naive_rag import NaiveRAG

class AgenticRAG(NaiveRAG):
    def reason_and_retrieve(self, query: str) -> List[Dict]:
        """Agent decides retrieval strategy."""
        
        search_tool = {
            "name": "search_knowledge_base",
            "description": "Search the Genesis knowledge base for information.",
            "input_schema": {
                "type": "object",
                "properties": {
                    "search_queries": {
                        "type": "array",
                        "items": {"type": "string"},
                        "description": "List of specific queries to search for."
                    }
                },
                "required": ["search_queries"]
            }
        }

        response = self.anthropic.messages.create(
            model=config.llm.model,
            max_tokens=1024,
            messages=[
                {"role": "user", "content": query}
            ],
            system="You are a smart research assistant. Use the search tool if you need external information to answer the user. If you know the answer (e.g. general knowledge), you can answer directly.",
            tools=[search_tool]
        )
        
        # Check for tool use
        # Anthropic Tool Use format: content block specific type
        
        tool_use_block = next((b for b in response.content if b.type == 'tool_use'), None)
        
        if tool_use_block:
            if tool_use_block.name == "search_knowledge_base":
                args = tool_use_block.input
                queries = args.get("search_queries", [query])
                
                all_context = []
                for q in queries:
                    results = super().retrieve(q, limit=3)
                    all_context.extend(results)
                
                unique_context = {c['content']: c for c in all_context}.values()
                return list(unique_context)
            
        return [] 

    def query(self, query: str) -> Dict:
        """End-to-end Agentic pipeline."""
        context = self.reason_and_retrieve(query)
        
        if context:
            answer = self.generate(query, context)
            method = "Agentic (Retrieved)"
        else:
            response = self.anthropic.messages.create(
                model=config.llm.model,
                max_tokens=1024,
                messages=[{"role": "user", "content": query}]
            )
            answer = response.content[0].text
            method = "Agentic (Direct)"

        return {
            "query": query,
            "answer": answer,
            "context": context,
            "method": method
        }

if __name__ == "__main__":
    rag = AgenticRAG()
