diff --git a/AI/app.js b/AI/app.js index 3798f1a..74c5b39 100644 --- a/AI/app.js +++ b/AI/app.js @@ -458,6 +458,17 @@ async function setMutedState(muted, { announce = false } = {}) { if (!recognition) { + if (!muted && !hasMicPermission) { + hasMicPermission = await requestMicPermission(); + if (!hasMicPermission) { + updateMuteIndicator(); + if (announce) { + speak('Microphone permission is required to unmute.'); + } + return false; + } + } + isMuted = muted; updateMuteIndicator(); if (muted) { diff --git a/tests/test_voice_ui.py b/tests/test_voice_ui.py index b609bb7..9e56699 100644 --- a/tests/test_voice_ui.py +++ b/tests/test_voice_ui.py @@ -155,6 +155,94 @@ class TestRecognition { """ +STUB_SCRIPT_NO_RECOGNITION = """ +(() => { + const state = { + speakCalls: [], + recognitionStartCalls: 0, + recognitionStopCalls: 0, + getUserMediaCalls: 0 + }; + + Object.defineProperty(window, "__testState", { + value: state, + configurable: false, + writable: false + }); + + delete window.SpeechRecognition; + delete window.webkitSpeechRecognition; + + const synth = window.speechSynthesis; + if (synth) { + try { + synth.getVoices = () => []; + } catch (error) { + console.warn("Unable to override getVoices", error); + } + + try { + Object.defineProperty(synth, "speaking", { + configurable: true, + get() { + return false; + } + }); + } catch (error) { + console.warn("Unable to redefine speaking property", error); + } + + const stubbedSpeak = (utterance) => { + const spoken = + typeof utterance === "string" + ? utterance + : typeof utterance?.text === "string" + ? utterance.text + : ""; + state.speakCalls.push(spoken); + }; + + try { + synth.speak = stubbedSpeak; + } catch (error) { + try { + Object.defineProperty(synth, "speak", { + configurable: true, + writable: true, + value: stubbedSpeak + }); + } catch (defineError) { + console.warn("Unable to override speechSynthesis.speak", defineError); + } + } + + try { + synth.cancel = () => {}; + } catch (error) { + console.warn("Unable to override speechSynthesis.cancel", error); + } + } + + if (!navigator.mediaDevices) { + navigator.mediaDevices = {}; + } + + navigator.mediaDevices.getUserMedia = function () { + state.getUserMediaCalls += 1; + return Promise.resolve({ + getTracks() { + return [ + { + stop() {} + } + ]; + } + }); + }; +})(); +""" + + def launch_chromium(playwright): try: return playwright.chromium.launch() @@ -179,6 +267,20 @@ def loaded_page(): browser.close() +@pytest.fixture +def page_without_recognition(): + with sync_playwright() as playwright: + browser = launch_chromium(playwright) + context = browser.new_context(ignore_https_errors=True) + page = context.new_page() + page.add_init_script(STUB_SCRIPT_NO_RECOGNITION) + page.goto(SITE_URL, wait_until="load") + page.wait_for_selector("#mute-indicator") + yield page + context.close() + browser.close() + + def test_unmute_flow_triggers_recognition_and_updates_indicator(loaded_page): page = loaded_page page.evaluate("window.__testState.recognitionStartCalls = 0") @@ -205,6 +307,17 @@ def test_unmute_requests_microphone_permission_once(loaded_page): page.wait_for_function("() => window.__testState.getUserMediaCalls === 1") +def test_mute_button_requests_permission_without_recognition(page_without_recognition): + page = page_without_recognition + page.evaluate("window.__testState.getUserMediaCalls = 0") + page.click("#mute-indicator") + page.wait_for_function("() => window.__testState.getUserMediaCalls === 1") + + indicator_text = page.text_content("#mute-indicator .indicator-text") + assert indicator_text is not None + assert "Voice recognition unavailable" in indicator_text + + def test_voice_prompts_announce_theme_changes(loaded_page): page = loaded_page page.evaluate("window.__testState.speakCalls = []")