import os
import sys
import requests
import logging
import argparse

# Ensure necessary path for FastMCP is available if needed, though we'll use fallback for robustness
sys.path.append("/mnt/e/genesis-system/mcp-servers")

# Initialize at module level
OPENROUTER_API_KEY = None 
OPENROUTER_URL = "https://openrouter.ai/api/v1/chat/completions"

MODELS = {
    "kimi": "moonshotai/kimi-k2.5" # Ensure this is the updated K2.5 model
}

def call_model(model_key: str, prompt: str, system_prompt: str = "") -> str:
    global OPENROUTER_API_KEY # Declare as global here
    
    # Attempt to get from environment first, only once
    if OPENROUTER_API_KEY is None:
        OPENROUTER_API_KEY = os.environ.get("OPENROUTER_API_KEY")

    if not OPENROUTER_API_KEY:
        secrets_path = "/mnt/e/genesis-system/config/secrets.env"
        if os.path.exists(secrets_path):
            with open(secrets_path, 'r') as f:
                for line in f:
                    if line.startswith("OPENROUTER_API_KEY="):
                        OPENROUTER_API_KEY = line.strip().split("=")[1]
                        break
        if not OPENROUTER_API_KEY:
            return "Error: OPENROUTER_API_KEY not found in environment or secrets.env."
    
    model_id = MODELS.get(model_key, model_key)
    
    headers = {
        "Authorization": f"Bearer {OPENROUTER_API_KEY}",
        "Content-Type": "application/json"
    }
    
    data = {
        "model": model_id,
        "messages": [
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": prompt}
        ]
    }
    
    try:
        response = requests.post(OPENROUTER_URL, headers=headers, json=data)
        # Instead of directly raising for status, check and print more details
        if response.status_code != 200:
            print(f"OpenRouter API Error: Status Code {response.status_code}")
            print(f"OpenRouter API Error Response: {response.text}")
            response.raise_for_status() # Still raise for status to propagate the error
            
        result = response.json()
        return result['choices'][0]['message']['content']
    except Exception as e:
        print(f"Error calling {model_key}: {e}")
        return f"Error: {str(e)}"

def execute_clone_website_plan(target_url: str, output_dir: str = "cloned_websites"):
    print(f"--- 🚀 Initiating Kimi K2.5 Swarm for Website Cloning: {target_url} ---")

    # Create output directory if it doesn't exist
    os.makedirs(output_dir, exist_ok=True)
    sanitized_url_name = target_url.replace("https://", "").replace("http://", "").replace("/", "_").replace(".", "_")
    output_file_html = os.path.join(output_dir, f"{sanitized_url_name}.html")
    output_file_css = os.path.join(output_dir, f"{sanitized_url_name}.css")
    output_file_js = os.path.join(output_dir, f"{sanitized_url_name}.js")
    output_file_report = os.path.join(output_dir, f"{sanitized_url_name}_report.md")


    # Craft a detailed prompt for Kimi K2.5 leveraging its vision stack and browser capabilities
    kimi_prompt = f"""
    You are Kimi K2.5, an advanced multimodal browser agent with a powerful vision stack and agent swarm capabilities.
    Your task is to "perfectly clone" the website located at {target_url}.

    Follow these steps in your agent swarm mode:
    1.  **Visit and Visually Analyze**: Access the URL: {target_url}. Use your vision stack to thoroughly analyze the layout, design, visual elements, and overall user interface. Identify all visual components, their styles, and their positions.
    2.  **Extract Content**: Extract all visible text content, image URLs, and other media assets.
    3.  **Recreate HTML Structure**: Generate the full HTML code that replicates the website's structure and content as accurately as possible. Include appropriate semantic HTML5 tags.
    4.  **Recreate CSS Styling**: Generate the CSS code to perfectly match the visual styling of the website. Ensure responsiveness if the original site is responsive. Embed or link any necessary fonts or external stylesheets if identified.
    5.  **Recreate Basic JavaScript (if any)**: Identify and replicate any *basic* client-side JavaScript that controls UI elements, animations, or simple interactions. Avoid complex backend logic.
    6.  **Organize Output**: Provide the HTML, CSS, and JavaScript as separate code blocks, clearly labeled.
    7.  **Generate a Cloning Report**: Provide a brief report (in Markdown) summarizing the cloning process, any challenges encountered, fidelity of the clone, and any limitations or elements that could not be perfectly replicated.

    Aim for pixel-perfect replication. Use your agent swarm to parallelize tasks like asset extraction and code generation for different sections of the page.
    """

    print(f"\n💻 Agent Kimi (Cloning {target_url})...")
    
    # The system prompt should ensure Kimi focuses on the task and output format
    system_prompt = """
    You are an expert web developer and an autonomous agent. Provide high-fidelity, production-ready code.
    Your output must strictly adhere to the following format:

    ```html
    <!-- Replicated HTML for the website -->
    ```

    ```css
    /* Replicated CSS for the website */
    ```

    ```javascript
    // Replicated JavaScript for the website (if any basic client-side logic is found)
    ```

    ```markdown
    ## Website Cloning Report for {target_url}
    ...
    ```
    Ensure all sections are present, even if empty (e.g., if no JS is found).
    """

    full_response = call_model("kimi", kimi_prompt, system_prompt)

    # Save raw response for debugging
    sanitized_url_name = target_url.replace("https://", "").replace("http://", "").replace("/", "_").replace(".", "_")
    raw_response_path = os.path.join(output_dir, f"{sanitized_url_name}_raw_response.txt")
    with open(raw_response_path, "w") as f:
        f.write(full_response)
    print(f"✅ Raw Kimi response saved to {raw_response_path}")


    # Parse the response into individual sections
    html_content = ""
    css_content = ""
    js_content = ""
    report_content = ""

    current_section = None
    for line in full_response.splitlines():
        if line.startswith("```html"):
            current_section = "html"
        elif line.startswith("```css"):
            current_section = "css"
        elif line.startswith("```javascript"):
            current_section = "js"
        elif line.startswith("```markdown"):
            current_section = "markdown"
        elif line.startswith("```"):
            current_section = None
        elif current_section == "html":
            html_content += line + "\n"
        elif current_section == "css":
            css_content += line + "\n"
        elif current_section == "js":
            js_content += line + "\n"
        elif current_section == "markdown":
            report_content += line + "\n"

    with open(output_file_html, "w") as f:
        f.write(html_content.strip())
    print(f"✅ HTML saved to {output_file_html}")

    with open(output_file_css, "w") as f:
        f.write(css_content.strip())
    print(f"✅ CSS saved to {output_file_css}")

    with open(output_file_js, "w") as f:
        f.write(js_content.strip())
    print(f"✅ JavaScript saved to {output_file_js}")

    with open(output_file_report, "w") as f:
        f.write(report_content.strip())
    print(f"✅ Cloning Report saved to {output_file_report}")

    print(f"--- Kimi K2.5 Swarm Cloning of {target_url} Complete ---")

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Kimi K2.5 Swarm for Website Cloning.")
    parser.add_argument("--url", required=True, help="The URL of the website to clone.")
    parser.add_argument("--output-dir", default="cloned_websites", help="Directory to save cloned files.")
    
    args = parser.parse_args()
    execute_clone_website_plan(args.url, args.output_dir)