#!/usr/bin/env python3
"""
Deep diagnostic for talkingwidget.ai
- Uses fake mic so getUserMedia succeeds
- Spies on all Telnyx element events
- Checks exposed methods
- Checks mic/audio state after clicking
- Tests va:start -> Simli activation
"""

import asyncio
import json
import sys
from playwright.async_api import async_playwright

DIAGNOSTIC_REPORT = {}

async def run():
    async with async_playwright() as p:
        print("[DIAG] Launching Chromium with fake mic flags...")
        browser = await p.chromium.launch(
            headless=True,
            args=[
                "--use-fake-ui-for-media-stream",
                "--use-fake-device-for-media-stream",
                "--allow-file-access-from-files",
                "--autoplay-policy=no-user-gesture-required",
                "--disable-web-security",
                "--allow-running-insecure-content",
                "--no-sandbox",
                "--disable-setuid-sandbox",
            ]
        )

        context = await browser.new_context(
            permissions=["microphone", "camera"],
            # Grant mic permission to the origin
        )

        # Capture console logs
        console_messages = []
        page = await context.new_page()
        page.on("console", lambda msg: console_messages.append(f"[{msg.type}] {msg.text}"))
        page.on("pageerror", lambda err: console_messages.append(f"[PAGEERROR] {err}"))

        # Grant mic permissions explicitly
        await context.grant_permissions(["microphone"], origin="https://talkingwidget.ai")

        # STEP 1: Navigate
        print("[DIAG] Step 1: Navigating to https://talkingwidget.ai ...")
        try:
            await page.goto("https://talkingwidget.ai", wait_until="domcontentloaded", timeout=30000)
            print(f"[DIAG] Navigated. Title: {await page.title()}")
        except Exception as e:
            print(f"[DIAG] Navigation error: {e}")

        # STEP 2: Wait 5 seconds for load
        print("[DIAG] Step 2: Waiting 5 seconds for full load...")
        await asyncio.sleep(5)

        # Check page structure first
        print("[DIAG] Checking page structure...")
        page_info = await page.evaluate("""() => {
            var el = document.getElementById('ai-voice-engine');
            var overlay = document.getElementById('avatar-idle-overlay');
            return {
                hasVoiceEl: !!el,
                hasOverlay: !!overlay,
                telnyxElements: Array.from(document.querySelectorAll('telnyx-ai-agent')).map(e => ({id: e.id, tagName: e.tagName})),
                allIds: Array.from(document.querySelectorAll('[id]')).map(e => e.id).slice(0, 40),
                bodyText: document.body.innerText.slice(0, 300)
            };
        }""")
        print(f"[DIAG] Page structure: {json.dumps(page_info, indent=2)}")
        DIAGNOSTIC_REPORT['page_structure'] = page_info

        # STEP 3: Inject event spy
        print("[DIAG] Step 3: Injecting event spy...")
        spy_result = await page.evaluate("""() => {
            window._telnyxEvents = [];
            function spyOn(el) {
                var allEvents = ['connected','disconnected','call-started','call-ended',
                    'telnyx:connected','telnyx:disconnected','telnyx:callStarted','telnyx:callEnded',
                    'telnyx-call-connected','telnyx-call-started','telnyx-call-ended','telnyx-call-disconnected',
                    'va:start','va:end','stateChange','readyStateChange','open','close',
                    'message','error','callStarted','callEnded','change',
                    'telnyx:agentSpeaking','telnyx:userSpeaking','telnyx:silenceDetected'];
                allEvents.forEach(function(evt) {
                    el.addEventListener(evt, function(e) {
                        window._telnyxEvents.push({event: evt, target: 'element', time: Date.now(), detail: e.detail ? JSON.stringify(e.detail) : null});
                        console.log('[SPY] element event:', evt, e.detail || '');
                    });
                });
                allEvents.forEach(function(evt) {
                    document.addEventListener(evt, function(e) {
                        window._telnyxEvents.push({event: 'DOC:'+evt, target: 'document', time: Date.now(), detail: e.detail ? JSON.stringify(e.detail) : null});
                        console.log('[SPY] document event:', evt, e.detail || '');
                    });
                });
                // Also spy on window
                allEvents.forEach(function(evt) {
                    window.addEventListener(evt, function(e) {
                        window._telnyxEvents.push({event: 'WIN:'+evt, target: 'window', time: Date.now(), detail: e.detail ? JSON.stringify(e.detail) : null});
                        console.log('[SPY] window event:', evt, e.detail || '');
                    });
                });
            }
            var el = document.getElementById('ai-voice-engine');
            if (el) {
                spyOn(el);
                return 'SPY ATTACHED to #ai-voice-engine';
            } else {
                // Try telnyx-ai-agent
                var el2 = document.querySelector('telnyx-ai-agent');
                if (el2) { spyOn(el2); return 'SPY ATTACHED to telnyx-ai-agent (no id)'; }
                return 'NO ELEMENT FOUND — spy on document/window only, attaching doc-level';
            }
        }""")
        print(f"[DIAG] Spy injection: {spy_result}")
        DIAGNOSTIC_REPORT['spy_injection'] = spy_result

        # STEP 4: Wait 1 second
        await asyncio.sleep(1)

        # STEP 5: Check what methods the element exposes
        print("[DIAG] Step 5: Checking element methods...")
        methods_result = await page.evaluate("""() => {
            var el = document.getElementById('ai-voice-engine') || document.querySelector('telnyx-ai-agent');
            if (!el) return {error: 'no element found', allCustomElements: Array.from(document.querySelectorAll('*')).filter(e => e.tagName.includes('-')).map(e => ({tag: e.tagName, id: e.id})).slice(0,20)};
            var methods = [];
            var proto = Object.getPrototypeOf(el);
            var seen = new Set(['constructor','addEventListener','removeEventListener','dispatchEvent',
                'getAttribute','setAttribute','removeAttribute','hasAttribute',
                'appendChild','removeChild','replaceChild','insertBefore',
                'cloneNode','contains','matches','closest','querySelector','querySelectorAll',
                'getBoundingClientRect','focus','blur','click','scrollIntoView']);
            while (proto && proto !== HTMLElement.prototype) {
                Object.getOwnPropertyNames(proto).forEach(function(n) {
                    if (typeof el[n] === 'function' && !seen.has(n)) {
                        seen.add(n);
                        methods.push(n);
                    }
                });
                proto = Object.getPrototypeOf(proto);
            }
            // Also check own properties
            var attrs = [];
            for (var a of el.attributes) { attrs.push({name: a.name, value: a.value}); }
            // Check shadowRoot
            var shadowInfo = null;
            if (el.shadowRoot) {
                shadowInfo = {
                    childCount: el.shadowRoot.childNodes.length,
                    innerHTML: el.shadowRoot.innerHTML.slice(0, 500),
                    audioElements: el.shadowRoot.querySelectorAll('audio').length,
                    videoElements: el.shadowRoot.querySelectorAll('video').length,
                    buttonElements: el.shadowRoot.querySelectorAll('button').length
                };
            }
            return {
                methods: methods.slice(0, 50),
                attributes: attrs,
                shadowRoot: shadowInfo,
                tagName: el.tagName,
                id: el.id,
                className: el.className
            };
        }""")
        print(f"[DIAG] Element methods: {json.dumps(methods_result, indent=2)}")
        DIAGNOSTIC_REPORT['element_methods'] = methods_result

        # STEP 6: Click the hero mic button
        print("[DIAG] Step 6: Clicking hero mic button (#avatar-idle-overlay)...")
        try:
            overlay = await page.query_selector('#avatar-idle-overlay')
            if overlay:
                await overlay.click()
                print("[DIAG] Clicked #avatar-idle-overlay")
                DIAGNOSTIC_REPORT['click_attempt'] = 'clicked #avatar-idle-overlay'
            else:
                # Try alternatives
                print("[DIAG] #avatar-idle-overlay not found, trying alternatives...")
                alts = ['button[data-action]', '.mic-button', '.start-button', '[onclick]', 'button']
                clicked = False
                for sel in alts:
                    try:
                        el = await page.query_selector(sel)
                        if el:
                            await el.click()
                            print(f"[DIAG] Clicked alternative: {sel}")
                            DIAGNOSTIC_REPORT['click_attempt'] = f'clicked {sel}'
                            clicked = True
                            break
                    except:
                        pass
                if not clicked:
                    # Try JS click
                    js_click = await page.evaluate("""() => {
                        var candidates = ['#avatar-idle-overlay', '.mic-button', '.start-call', '[id*="overlay"]', '[id*="mic"]', '[id*="start"]'];
                        for (var sel of candidates) {
                            var el = document.querySelector(sel);
                            if (el) { el.click(); return 'JS clicked: ' + sel; }
                        }
                        // Try clicking the telnyx element itself
                        var telEl = document.getElementById('ai-voice-engine') || document.querySelector('telnyx-ai-agent');
                        if (telEl) { telEl.click(); return 'JS clicked telnyx element'; }
                        return 'nothing to click';
                    }""")
                    print(f"[DIAG] JS click result: {js_click}")
                    DIAGNOSTIC_REPORT['click_attempt'] = js_click
        except Exception as e:
            print(f"[DIAG] Click error: {e}")
            DIAGNOSTIC_REPORT['click_error'] = str(e)

        # Also try calling startCall method directly
        print("[DIAG] Trying to call startCall/connect directly on element...")
        direct_call = await page.evaluate("""() => {
            var el = document.getElementById('ai-voice-engine') || document.querySelector('telnyx-ai-agent');
            if (!el) return 'no element';
            // Try all common method names
            var tried = [];
            ['startCall','start','connect','call','begin','initialize','init'].forEach(function(m) {
                if (typeof el[m] === 'function') {
                    try { el[m](); tried.push(m + ':called'); } catch(e) { tried.push(m + ':error:' + e.message); }
                } else { tried.push(m + ':not-a-function'); }
            });
            return tried;
        }""")
        print(f"[DIAG] Direct method call results: {json.dumps(direct_call)}")
        DIAGNOSTIC_REPORT['direct_method_calls'] = direct_call

        # STEP 7: Wait 8 seconds for events
        print("[DIAG] Step 7: Waiting 8 seconds for events to fire...")
        await asyncio.sleep(8)

        # STEP 8: Check what events fired
        print("[DIAG] Step 8: Checking fired events...")
        events = await page.evaluate("() => window._telnyxEvents")
        print(f"[DIAG] Events fired: {json.dumps(events, indent=2)}")
        DIAGNOSTIC_REPORT['events_fired'] = events

        # STEP 9: Check mic/audio state
        print("[DIAG] Step 9: Checking mic/audio state...")
        audio_state = await page.evaluate("""() => {
            var el = document.getElementById('ai-voice-engine') || document.querySelector('telnyx-ai-agent');
            var shadowAudioCount = 0;
            var shadowVideoCount = 0;
            var shadowHTML = '';
            if (el && el.shadowRoot) {
                shadowAudioCount = el.shadowRoot.querySelectorAll('audio').length;
                shadowVideoCount = el.shadowRoot.querySelectorAll('video').length;
                shadowHTML = el.shadowRoot.innerHTML.slice(0, 1000);
            }
            // Check WebRTC state
            var peerConnections = [];
            // Enumerate attributes
            var elAttrs = {};
            if (el) {
                for (var a of el.attributes) { elAttrs[a.name] = a.value; }
            }
            return {
                audioCtxAvailable: !!(window.AudioContext || window.webkitAudioContext),
                mediaDevicesAvailable: !!(navigator.mediaDevices && navigator.mediaDevices.getUserMedia),
                voiceElReadyState: el ? el.getAttribute('ready-state') : null,
                voiceElCallState: el ? (el.getAttribute('call-status') || el.getAttribute('state') || el.getAttribute('call-state')) : null,
                voiceElAllAttrs: elAttrs,
                shadowRootAudioCount: shadowAudioCount,
                shadowRootVideoCount: shadowVideoCount,
                shadowHTML: shadowHTML,
                simliVidExists: !!document.getElementById('simli-vid'),
                simliVidDisplay: document.getElementById('simli-vid') ? document.getElementById('simli-vid').style.display : 'N/A',
                simliCanvasExists: !!document.querySelector('canvas[id*="simli"]'),
                allDocAudioElements: document.querySelectorAll('audio').length,
                allDocVideoElements: document.querySelectorAll('video').length,
                currentURL: window.location.href
            };
        }""")
        print(f"[DIAG] Audio/mic state: {json.dumps(audio_state, indent=2)}")
        DIAGNOSTIC_REPORT['audio_state'] = audio_state

        # STEP 10: Manually fire va:start and check Simli
        print("[DIAG] Step 10: Manually firing va:start...")
        await page.evaluate("""() => {
            document.dispatchEvent(new Event('va:start'));
            window.dispatchEvent(new Event('va:start'));
            // Also try CustomEvent with bubbles
            document.dispatchEvent(new CustomEvent('va:start', {bubbles: true, detail: {}}));
        }""")
        await asyncio.sleep(3)

        simli_state = await page.evaluate("""() => {
            var simliVid = document.getElementById('simli-vid');
            var simliCanvas = document.querySelector('canvas');
            var avatarEl = document.getElementById('avatar-speaking-overlay') || document.getElementById('simli-container');
            return {
                simliVidExists: !!simliVid,
                simliVidDisplay: simliVid ? simliVid.style.display : 'N/A',
                simliVidSrc: simliVid ? simliVid.src : 'N/A',
                simliCanvasExists: !!simliCanvas,
                avatarElExists: !!avatarEl,
                avatarElDisplay: avatarEl ? avatarEl.style.display : 'N/A',
                // Check if any element changed visibility
                allHiddenEls: Array.from(document.querySelectorAll('[style*="display: none"]')).map(e => e.id || e.className).slice(0,10),
                allVisibleEls: Array.from(document.querySelectorAll('[style*="display: block"], [style*="display: flex"]')).map(e => e.id || e.className).slice(0,10),
                // All events now
                allEvents: window._telnyxEvents
            };
        }""")
        print(f"[DIAG] Simli state after va:start: {json.dumps(simli_state, indent=2)}")
        DIAGNOSTIC_REPORT['simli_after_vastart'] = simli_state

        # Get full page HTML snapshot
        print("[DIAG] Capturing full page HTML snapshot (for structure analysis)...")
        html_snapshot = await page.evaluate("""() => document.documentElement.outerHTML.slice(0, 8000)""")
        DIAGNOSTIC_REPORT['html_snapshot'] = html_snapshot

        # Check all console messages
        DIAGNOSTIC_REPORT['console_messages'] = console_messages

        # Check WebRTC / getUserMedia status
        print("[DIAG] Checking getUserMedia capability...")
        media_state = await page.evaluate("""async () => {
            try {
                var stream = await navigator.mediaDevices.getUserMedia({audio: true});
                var tracks = stream.getAudioTracks();
                stream.getTracks().forEach(t => t.stop());
                return {success: true, tracks: tracks.length, label: tracks[0] ? tracks[0].label : 'none'};
            } catch(e) {
                return {success: false, error: e.message, name: e.name};
            }
        }""")
        print(f"[DIAG] getUserMedia test: {json.dumps(media_state)}")
        DIAGNOSTIC_REPORT['getUserMedia_test'] = media_state

        # Deep attribute inspection of telnyx element
        print("[DIAG] Deep attribute inspection...")
        deep_inspect = await page.evaluate("""() => {
            var el = document.getElementById('ai-voice-engine') || document.querySelector('telnyx-ai-agent');
            if (!el) return {error: 'no element'};
            // Get all properties including non-enumerable
            var props = {};
            var proto = el;
            while (proto && proto !== HTMLElement.prototype) {
                Object.getOwnPropertyNames(proto).forEach(function(name) {
                    try {
                        var val = el[name];
                        var type = typeof val;
                        if (type !== 'function' && name !== 'outerHTML' && name !== 'innerHTML') {
                            props[name] = {type: type, value: type === 'object' ? '[object]' : String(val).slice(0, 100)};
                        }
                    } catch(e) {}
                });
                proto = Object.getPrototypeOf(proto);
            }
            return {
                ownProps: props,
                tagName: el.tagName,
                id: el.id,
                // Check for internal stencil state
                hasStencilEl: !!(el.__stencilEl || el._instance || el.__stencilInstance),
                stencilKeys: el.__stencilEl ? Object.keys(el.__stencilEl).slice(0,20) : []
            };
        }""")
        print(f"[DIAG] Deep inspect: {json.dumps(deep_inspect, indent=2)}")
        DIAGNOSTIC_REPORT['deep_inspect'] = deep_inspect

        # Take screenshot for visual reference
        await page.screenshot(path="/mnt/e/genesis-system/scripts/talkingwidget_diagnostic.png", full_page=False)
        print("[DIAG] Screenshot saved to scripts/talkingwidget_diagnostic.png")

        await browser.close()

    return DIAGNOSTIC_REPORT


if __name__ == "__main__":
    report = asyncio.run(run())

    print("\n" + "="*80)
    print("FINAL DIAGNOSTIC REPORT")
    print("="*80)

    print("\n--- PAGE STRUCTURE ---")
    print(json.dumps(report.get('page_structure', {}), indent=2))

    print("\n--- SPY INJECTION ---")
    print(report.get('spy_injection', 'N/A'))

    print("\n--- ELEMENT METHODS ---")
    methods = report.get('element_methods', {})
    print(f"Methods exposed: {methods.get('methods', [])}")
    print(f"Attributes: {methods.get('attributes', [])}")
    if methods.get('shadowRoot'):
        sr = methods['shadowRoot']
        print(f"Shadow DOM: audio={sr.get('audioElements')}, video={sr.get('videoElements')}, buttons={sr.get('buttonElements')}")
        print(f"Shadow innerHTML (first 500): {sr.get('innerHTML', '')[:500]}")

    print("\n--- DIRECT METHOD CALLS ---")
    print(json.dumps(report.get('direct_method_calls', []), indent=2))

    print("\n--- EVENTS FIRED ---")
    events = report.get('events_fired', [])
    if events:
        for e in events:
            print(f"  EVENT: {e}")
    else:
        print("  NO EVENTS FIRED")

    print("\n--- AUDIO/MIC STATE ---")
    audio = report.get('audio_state', {})
    print(f"  getUserMedia available: {audio.get('mediaDevicesAvailable')}")
    print(f"  Voice el ready state: {audio.get('voiceElReadyState')}")
    print(f"  Voice el call state: {audio.get('voiceElCallState')}")
    print(f"  Voice el all attrs: {audio.get('voiceElAllAttrs')}")
    print(f"  Shadow audio elements: {audio.get('shadowRootAudioCount')}")
    print(f"  Shadow HTML (first 1000): {audio.get('shadowHTML', '')[:1000]}")
    print(f"  Simli vid exists: {audio.get('simliVidExists')}")
    print(f"  Simli vid display: {audio.get('simliVidDisplay')}")

    print("\n--- getUserMedia TEST ---")
    print(json.dumps(report.get('getUserMedia_test', {}), indent=2))

    print("\n--- SIMLI AFTER va:start ---")
    simli = report.get('simli_after_vastart', {})
    print(f"  Simli vid exists: {simli.get('simliVidExists')}")
    print(f"  Simli vid display: {simli.get('simliVidDisplay')}")
    print(f"  Simli vid src: {simli.get('simliVidSrc')}")
    print(f"  All events after va:start: {simli.get('allEvents', [])}")

    print("\n--- CONSOLE MESSAGES ---")
    for msg in report.get('console_messages', [])[:50]:
        print(f"  {msg}")

    print("\n--- DEEP INSPECT (non-function props) ---")
    di = report.get('deep_inspect', {})
    print(f"  Has stencil: {di.get('hasStencilEl')}")
    print(f"  Stencil keys: {di.get('stencilKeys')}")
    own = di.get('ownProps', {})
    # Filter to interesting props
    interesting = {k: v for k, v in own.items() if k not in ['nodeType','nodeValue','nodeName','tagName','localName','baseURI','isConnected','ownerDocument','textContent'] and v.get('type') not in ['undefined']}
    print(f"  Interesting properties: {json.dumps(interesting, indent=2)}")

    # Save full report
    with open("/mnt/e/genesis-system/scripts/talkingwidget_diagnostic_report.json", "w") as f:
        json.dump(report, f, indent=2)
    print("\n[DIAG] Full report saved to scripts/talkingwidget_diagnostic_report.json")
