#!/usr/bin/env python3
"""
Diagnostic #3 — isolate the Simli 'Start error: undefined' and audio interception issue
"""
import asyncio
import json
from playwright.async_api import async_playwright

async def run():
    async with async_playwright() as p:
        browser = await p.chromium.launch(
            headless=True,
            args=[
                "--use-fake-ui-for-media-stream",
                "--use-fake-device-for-media-stream",
                "--autoplay-policy=no-user-gesture-required",
                "--no-sandbox",
            ]
        )
        context = await browser.new_context(permissions=["microphone"])
        await context.grant_permissions(["microphone"], origin="https://talkingwidget.ai")

        console_messages = []
        page = await context.new_page()
        page.on("console", lambda msg: console_messages.append(f"[{msg.type.upper()}] {msg.text}"))

        print("[D3] Navigating...")
        await page.goto("https://talkingwidget.ai", wait_until="networkidle", timeout=45000)
        await asyncio.sleep(4)

        # Intercept the Simli bridge error more precisely
        print("[D3] Patching simli-bridge.js startSession to capture full error...")
        await page.evaluate("""() => {
            // Monkey-patch console.error to capture the full error object
            var origErr = console.error;
            window._simliErrors = [];
            console.error = function() {
                var args = Array.from(arguments);
                window._simliErrors.push(args.map(function(a) {
                    if (a instanceof Error) return {message: a.message, stack: a.stack, name: a.name};
                    if (typeof a === 'object') {
                        try { return JSON.stringify(a); } catch(e) { return String(a); }
                    }
                    return String(a);
                }));
                origErr.apply(console, arguments);
            };

            // Also capture unhandled promise rejections
            window._unhandledRejections = [];
            window.addEventListener('unhandledrejection', function(e) {
                window._unhandledRejections.push({
                    reason: e.reason ? (e.reason.message || String(e.reason)) : 'unknown',
                    stack: e.reason ? e.reason.stack : null
                });
                console.log('[D3] Unhandled rejection:', e.reason);
            });
        }""")

        # Click to start call
        print("[D3] Clicking overlay to trigger va:start flow...")
        await page.click('#avatar-idle-overlay')
        await asyncio.sleep(3)

        # Manually trigger va:start to also test simli
        print("[D3] Also manually dispatching va:start...")
        await page.evaluate("() => document.dispatchEvent(new Event('va:start'))")
        await asyncio.sleep(5)

        # Collect errors
        simli_errors = await page.evaluate("() => window._simliErrors || []")
        unhandled = await page.evaluate("() => window._unhandledRejections || []")

        print(f"[D3] Simli console errors: {json.dumps(simli_errors, indent=2)}")
        print(f"[D3] Unhandled rejections: {json.dumps(unhandled, indent=2)}")

        # Check if audio element appeared in shadow DOM during the call
        shadow_after = await page.evaluate("""() => {
            var el = document.getElementById('ai-voice-engine');
            if (!el || !el.shadowRoot) return {error: 'no shadow'};
            var sr = el.shadowRoot;
            var audios = Array.from(sr.querySelectorAll('audio'));
            return {
                audioCount: audios.length,
                audioDetails: audios.map(function(a) {
                    return {
                        id: a.id,
                        src: a.src,
                        srcObject: a.srcObject ? 'MediaStream' : null,
                        readyState: a.readyState,
                        paused: a.paused,
                        muted: a.muted,
                        autoplay: a.autoplay,
                        crossOrigin: a.crossOrigin,
                        _simliConnected: a._simliConnected || false
                    };
                }),
                // Count all audio in document too (simli creates its own hidden audio)
                docAudioCount: document.querySelectorAll('audio').length,
                docAudioDetails: Array.from(document.querySelectorAll('audio')).map(function(a) {
                    return {id: a.id, display: a.style.display, _simliConnected: a._simliConnected || false};
                })
            };
        }""")
        print(f"[D3] Shadow/doc audio after click: {json.dumps(shadow_after, indent=2)}")

        # Check if MutationObserver caught the shadow audio element
        observer_check = await page.evaluate("""() => {
            // Check if the shadow audio element was caught by our audio bridge
            var el = document.getElementById('ai-voice-engine');
            if (!el || !el.shadowRoot) return {error: 'no shadow'};
            var sr = el.shadowRoot;
            var shadowAudios = sr.querySelectorAll('audio');
            var results = Array.from(shadowAudios).map(function(a) {
                return {id: a.id || '(no id)', _connected: a._simliConnected, readyState: a.readyState};
            });
            return {
                shadowAudioCount: shadowAudios.length,
                shadowAudioDetails: results,
                // Check if the observer is watching the shadow root
                bridgeState: typeof window._simliGetBridgeState === 'function' ? window._simliGetBridgeState() : 'unknown'
            };
        }""")
        print(f"[D3] Observer/audio connection check: {json.dumps(observer_check, indent=2)}")

        # Check avatar-stage class (indicates call state)
        ui_state = await page.evaluate("""() => {
            var stage = document.getElementById('avatar-stage');
            var idleOverlay = document.getElementById('avatar-idle-overlay');
            var activeOverlay = document.getElementById('avatar-active-overlay');
            return {
                stageClass: stage ? stage.className : null,
                idleDisplay: idleOverlay ? window.getComputedStyle(idleOverlay).display : null,
                activeDisplay: activeOverlay ? window.getComputedStyle(activeOverlay).display : null,
                simliVidExists: !!document.getElementById('simli-vid'),
                simliVidDisplay: document.getElementById('simli-vid') ? document.getElementById('simli-vid').style.display : null,
                avatarVidSrc: document.getElementById('avatar-vid') ? document.getElementById('avatar-vid').src : null,
                avatarVidDisplay: document.getElementById('avatar-vid') ? window.getComputedStyle(document.getElementById('avatar-vid')).display : null
            };
        }""")
        print(f"[D3] UI state during call: {json.dumps(ui_state, indent=2)}")

        # Now check what EVENTS fired from the Telnyx element during the actual call
        # by patching from the beginning of the call
        telnyx_events = await page.evaluate("""() => {
            var el = document.getElementById('ai-voice-engine');
            if (!el) return {error: 'no element'};
            // Check all attributes again
            var attrs = {};
            for (var a of el.attributes) { attrs[a.name] = a.value; }
            return {attrs: attrs};
        }""")
        print(f"[D3] Telnyx element attrs during call: {json.dumps(telnyx_events, indent=2)}")

        print("\n[D3] All console messages (chronological):")
        for msg in console_messages:
            print(f"  {msg}")

        await browser.close()

if __name__ == "__main__":
    asyncio.run(run())
