|
| 1 | +<!doctype html> |
| 2 | +<html lang="en-US"> |
| 3 | + <head> |
| 4 | + <link href="/assets/index.css" rel="stylesheet" type="text/css" /> |
| 5 | + <script crossorigin=" anonymous" src=" https://unpkg.com/[email protected]/umd/react.development.js" ></script> |
| 6 | + <script crossorigin=" anonymous" src=" https://unpkg.com/[email protected]/umd/react-dom.development.js" ></script> |
| 7 | + <script crossorigin="anonymous" src="/test-harness.js"></script> |
| 8 | + <script crossorigin="anonymous" src="/test-page-object.js"></script> |
| 9 | + <script crossorigin="anonymous" src="/__dist__/webchat-es5.js"></script> |
| 10 | + </head> |
| 11 | + <body> |
| 12 | + <main id="webchat"></main> |
| 13 | + <script type="importmap"> |
| 14 | + { |
| 15 | + "imports": { |
| 16 | + "@testduet/wait-for": "https://unpkg.com/@testduet/wait-for@main/dist/wait-for.mjs", |
| 17 | + "jest-mock": "https://esm.sh/jest-mock", |
| 18 | + "react-dictate-button/internal": "https://unpkg.com/react-dictate-button@main/dist/react-dictate-button.internal.mjs" |
| 19 | + } |
| 20 | + } |
| 21 | + </script> |
| 22 | + <script type="module"> |
| 23 | + import { waitFor } from '@testduet/wait-for'; |
| 24 | + import { fn, spyOn } from 'jest-mock'; |
| 25 | + import { |
| 26 | + SpeechGrammarList, |
| 27 | + SpeechRecognition, |
| 28 | + SpeechRecognitionAlternative, |
| 29 | + SpeechRecognitionErrorEvent, |
| 30 | + SpeechRecognitionEvent, |
| 31 | + SpeechRecognitionResult, |
| 32 | + SpeechRecognitionResultList |
| 33 | + } from 'react-dictate-button/internal'; |
| 34 | + import { SpeechSynthesis, SpeechSynthesisEvent, SpeechSynthesisUtterance } from '../speech/js/index.js'; |
| 35 | + import renderHook from './private/renderHook.js'; |
| 36 | + |
| 37 | + const { |
| 38 | + React: { createElement }, |
| 39 | + ReactDOM: { render }, |
| 40 | + testHelpers: { createDirectLineEmulator }, |
| 41 | + WebChat: { |
| 42 | + Components: { BasicWebChat, Composer }, |
| 43 | + hooks: { useDictateState }, |
| 44 | + renderWebChat, |
| 45 | + testIds |
| 46 | + } |
| 47 | + } = window; |
| 48 | + |
| 49 | + run(async function () { |
| 50 | + const speechSynthesis = new SpeechSynthesis(); |
| 51 | + const ponyfill = { |
| 52 | + SpeechGrammarList, |
| 53 | + SpeechRecognition: fn().mockImplementation(() => { |
| 54 | + const speechRecognition = new SpeechRecognition(); |
| 55 | + |
| 56 | + spyOn(speechRecognition, 'abort'); |
| 57 | + spyOn(speechRecognition, 'start'); |
| 58 | + |
| 59 | + return speechRecognition; |
| 60 | + }), |
| 61 | + speechSynthesis, |
| 62 | + SpeechSynthesisUtterance |
| 63 | + }; |
| 64 | + |
| 65 | + spyOn(speechSynthesis, 'speak'); |
| 66 | + |
| 67 | + const { directLine, store } = createDirectLineEmulator(); |
| 68 | + const WebChatWrapper = ({ children }) => |
| 69 | + createElement( |
| 70 | + Composer, |
| 71 | + { directLine, store, webSpeechPonyfillFactory: () => ponyfill }, |
| 72 | + createElement(BasicWebChat), |
| 73 | + children |
| 74 | + ); |
| 75 | + |
| 76 | + // WHEN: Render initially. |
| 77 | + const renderResult = renderHook(() => useDictateState()[0], { |
| 78 | + legacyRoot: true, |
| 79 | + wrapper: WebChatWrapper |
| 80 | + }); |
| 81 | + |
| 82 | + await pageConditions.uiConnected(); |
| 83 | + |
| 84 | + // THEN: `useDictateState` should returns IDLE. |
| 85 | + await waitFor(() => expect(renderResult).toHaveProperty('result.current', 0)); // IDLE |
| 86 | + |
| 87 | + // WHEN: Microphone button is clicked and priming user gesture is done. |
| 88 | + await pageObjects.clickMicrophoneButton(); |
| 89 | + |
| 90 | + await waitFor(() => expect(speechSynthesis.speak).toHaveBeenCalledTimes(1)); |
| 91 | + speechSynthesis.speak.mock.calls[0][0].dispatchEvent( |
| 92 | + new SpeechSynthesisEvent('end', { utterance: speechSynthesis.speak.mock.calls[0] }) |
| 93 | + ); |
| 94 | + |
| 95 | + // THEN: `useDictateState` should returns STARTING. |
| 96 | + renderResult.rerender(); |
| 97 | + // Dictate state "1" is for "automatic turning on microphone after current synthesis completed". |
| 98 | + await waitFor(() => expect(renderResult).toHaveProperty('result.current', 2)); |
| 99 | + |
| 100 | + // THEN: Should construct SpeechRecognition(). |
| 101 | + expect(ponyfill.SpeechRecognition).toHaveBeenCalledTimes(1); |
| 102 | + |
| 103 | + const { value: speechRecognition1 } = ponyfill.SpeechRecognition.mock.results[0]; |
| 104 | + |
| 105 | + // THEN: Should call SpeechRecognition.start(). |
| 106 | + expect(speechRecognition1.start).toHaveBeenCalledTimes(1); |
| 107 | + |
| 108 | + // WHEN: Recognition started and interims result is dispatched. |
| 109 | + speechRecognition1.dispatchEvent(new Event('start')); |
| 110 | + speechRecognition1.dispatchEvent(new Event('audiostart')); |
| 111 | + speechRecognition1.dispatchEvent(new Event('soundstart')); |
| 112 | + speechRecognition1.dispatchEvent(new Event('speechstart')); |
| 113 | + |
| 114 | + // WHEN: Recognized interim result of "Hello". |
| 115 | + speechRecognition1.dispatchEvent( |
| 116 | + new SpeechRecognitionEvent('result', { |
| 117 | + results: new SpeechRecognitionResultList( |
| 118 | + new SpeechRecognitionResult(new SpeechRecognitionAlternative(0, 'Hello')) |
| 119 | + ) |
| 120 | + }) |
| 121 | + ); |
| 122 | + |
| 123 | + // THEN: `useDictateState` should returns DICTATING. |
| 124 | + renderResult.rerender(); |
| 125 | + await waitFor(() => expect(renderResult).toHaveProperty('result.current', 3)); |
| 126 | + |
| 127 | + // WHEN: Recognized finalized result of "Hello, World!" and ended recognition. |
| 128 | + await ( |
| 129 | + await directLine.actPostActivity(() => |
| 130 | + speechRecognition1.dispatchEvent( |
| 131 | + new SpeechRecognitionEvent('result', { |
| 132 | + results: new SpeechRecognitionResultList( |
| 133 | + SpeechRecognitionResult.fromFinalized(new SpeechRecognitionAlternative(0.9, 'Hello, World!')) |
| 134 | + ) |
| 135 | + }) |
| 136 | + ) |
| 137 | + ) |
| 138 | + ).resolveAll(); |
| 139 | + |
| 140 | + speechRecognition1.dispatchEvent(new Event('speechend')); |
| 141 | + speechRecognition1.dispatchEvent(new Event('soundend')); |
| 142 | + speechRecognition1.dispatchEvent(new Event('audioend')); |
| 143 | + speechRecognition1.dispatchEvent(new Event('end')); |
| 144 | + |
| 145 | + // THEN: `useDictateState` should returns IDLE. |
| 146 | + renderResult.rerender(); |
| 147 | + await waitFor(() => expect(renderResult).toHaveProperty('result.current', 0)); |
| 148 | + |
| 149 | + // WHEN: Bot replied. |
| 150 | + await directLine.emulateIncomingActivity({ |
| 151 | + inputHint: 'expectingInput', // "expectingInput" should turn the microphone back on after synthesis completed. |
| 152 | + text: 'Aloha!', |
| 153 | + type: 'message' |
| 154 | + }); |
| 155 | + await pageConditions.numActivitiesShown(2); |
| 156 | + |
| 157 | + // THEN: Should call SpeechSynthesis.speak() again. |
| 158 | + await waitFor(() => expect(speechSynthesis.speak).toHaveBeenCalledTimes(2)); |
| 159 | + |
| 160 | + // THEN: Should start synthesize "Aloha!". |
| 161 | + expect(speechSynthesis.speak).toHaveBeenLastCalledWith(expect.any(SpeechSynthesisUtterance)); |
| 162 | + expect(speechSynthesis.speak).toHaveBeenLastCalledWith(expect.objectContaining({ text: 'Aloha!' })); |
| 163 | + |
| 164 | + // THEN: `useDictateState` should returns WILL_START. |
| 165 | + renderResult.rerender(); |
| 166 | + await waitFor(() => expect(renderResult).toHaveProperty('result.current', 1)); |
| 167 | + |
| 168 | + // WHEN: Synthesis completed. |
| 169 | + speechSynthesis.speak.mock.calls[1][0].dispatchEvent( |
| 170 | + new SpeechSynthesisEvent('end', { utterance: speechSynthesis.speak.mock.calls[1] }) |
| 171 | + ); |
| 172 | + |
| 173 | + // THEN: `useDictateState` should returns STARTING. |
| 174 | + renderResult.rerender(); |
| 175 | + await waitFor(() => expect(renderResult).toHaveProperty('result.current', 2)); |
| 176 | + |
| 177 | + // WHEN: Recognition started and interims result is dispatched. |
| 178 | + const { value: speechRecognition2 } = ponyfill.SpeechRecognition.mock.results[1]; |
| 179 | + |
| 180 | + // THEN: Should call SpeechRecognition.start(). |
| 181 | + expect(speechRecognition2.start).toHaveBeenCalledTimes(1); |
| 182 | + |
| 183 | + // WHEN: Recognition started and interims result is dispatched. |
| 184 | + speechRecognition2.dispatchEvent(new Event('start')); |
| 185 | + speechRecognition2.dispatchEvent(new Event('audiostart')); |
| 186 | + speechRecognition2.dispatchEvent(new Event('soundstart')); |
| 187 | + speechRecognition2.dispatchEvent(new Event('speechstart')); |
| 188 | + |
| 189 | + // WHEN: Recognized interim result of "Good". |
| 190 | + speechRecognition2.dispatchEvent( |
| 191 | + new SpeechRecognitionEvent('result', { |
| 192 | + results: new SpeechRecognitionResultList( |
| 193 | + new SpeechRecognitionResult(new SpeechRecognitionAlternative(0, 'Good')) |
| 194 | + ) |
| 195 | + }) |
| 196 | + ); |
| 197 | + |
| 198 | + // THEN: `useDictateState` should returns LISTENING. |
| 199 | + renderResult.rerender(); |
| 200 | + await waitFor(() => expect(renderResult).toHaveProperty('result.current', 3)); |
| 201 | + |
| 202 | + // WHEN: Click on microphone button. |
| 203 | + await pageObjects.clickMicrophoneButton(); |
| 204 | + |
| 205 | + // THEN: `useDictateState` should returns STOPPING. |
| 206 | + renderResult.rerender(); |
| 207 | + await waitFor(() => expect(renderResult).toHaveProperty('result.current', 4)); |
| 208 | + |
| 209 | + // WHEN: Recognition ended. |
| 210 | + speechRecognition2.dispatchEvent(new Event('speechend')); |
| 211 | + speechRecognition2.dispatchEvent(new Event('soundend')); |
| 212 | + speechRecognition2.dispatchEvent(new Event('audioend')); |
| 213 | + speechRecognition2.dispatchEvent(new Event('end')); |
| 214 | + |
| 215 | + // THEN: `useDictateState` should returns STOPPING. |
| 216 | + renderResult.rerender(); |
| 217 | + await waitFor(() => expect(renderResult).toHaveProperty('result.current', 0)); |
| 218 | + }); |
| 219 | + </script> |
| 220 | + </body> |
| 221 | +</html> |
0 commit comments