|
| 1 | +--- |
| 2 | +layout: post |
| 3 | +title: Speech-to-Text with Blazor Chat UI Component | Syncfusion |
| 4 | +description: Checkout and learn about Speech-to-Text configuration with Blazor Chat UI component in Blazor Server App and Blazor WebAssembly App. |
| 5 | +platform: Blazor |
| 6 | +control: Chat UI |
| 7 | +documentation: ug |
| 8 | +--- |
| 9 | + |
| 10 | +# Speech-to-Text in Blazor Chat UI |
| 11 | + |
| 12 | +The Syncfusion Blazor Chat UI component integrates `Speech-to-Text` functionality through the browser's [Web Speech API](https://developer.mozilla.org/en-US/docs/Web/API/Web_Speech_API). This enables the conversion of spoken words into text using the device's microphone, allowing users to interact with the Chat UI through voice input. |
| 13 | + |
| 14 | +## Prerequisites |
| 15 | + |
| 16 | +Before integrating `Speech-to-Text`, ensure the following: |
| 17 | + |
| 18 | +* **Syncfusion Chat UI**: Package [Syncfusion.Blazor.InteractiveChat](https://www.nuget.org/packages/Syncfusion.Blazor.InteractiveChat) installed. |
| 19 | + |
| 20 | +* **Syncfusion Speech to Text**: Package [Syncfusion.Blazor.Inputs](https://www.nuget.org/packages/Syncfusion.Blazor.Inputs) installed. |
| 21 | + |
| 22 | +## Set Up the Chat UI Component |
| 23 | + |
| 24 | +Follow the Syncfusion Chat UI [Getting Started](./getting-started) guide to configure and render the Chat UI component in your Blazor application. |
| 25 | + |
| 26 | +## Configure Speech-to-Text |
| 27 | + |
| 28 | +To enable Speech-to-Text functionality in the Angular Chat UI component, update the `Home.razor` file to incorporate the Web Speech API. |
| 29 | + |
| 30 | +The [SpeechToText](https://blazor.syncfusion.com/documentation/speech-to-text/getting-started-web-app) component listens to audio input from the device’s microphone, transcribes spoken words into text, and updates the Chat UI’s editable footer with the recognized text. Once the transcription appears in the footer, users can send it as a message to others. |
| 31 | + |
| 32 | +### Configuration Options |
| 33 | + |
| 34 | +* **[`Language`](https://help.syncfusion.com/cr/blazor/Syncfusion.Blazor.Inputs.SfSpeechToText.html#Syncfusion_Blazor_Inputs_SfSpeechToText_Language)**: Specifies the language for speech recognition. For example: |
| 35 | + |
| 36 | + * `en-US` for American English |
| 37 | + * `fr-FR` for French |
| 38 | + |
| 39 | +* **[`AllowInterimResults`](https://help.syncfusion.com/cr/blazor/Syncfusion.Blazor.Inputs.SfSpeechToText.html#Syncfusion_Blazor_Inputs_SfSpeechToText_AllowInterimResults)**: Set to `true` to receive real-time (interim) recognition results, or `false` to receive only final results. |
| 40 | + |
| 41 | +The `speechtotext.js` file handles operations related to the content of the editable footer, such as checking for meaningful input, clearing existing text, and updating the content with the transcribed value. Meanwhile, the `speechtotext.css` file styles the Chat UI layout and ensures the component remains responsive across different screen sizes and devices. |
| 42 | + |
| 43 | +{% tabs %} |
| 44 | +{% highlight c# tabtitle="razor" %} |
| 45 | + |
| 46 | +@using Syncfusion.Blazor.InteractiveChat |
| 47 | +@using Syncfusion.Blazor.Buttons |
| 48 | +@using Syncfusion.Blazor.Inputs |
| 49 | +@inject IJSRuntime JSRuntime |
| 50 | + |
| 51 | +<div class="integration-speechtotext"> |
| 52 | + <SfChatUI @ref="chatUI" ID="chatUser" AutoScrollToBottom=true User="CurrentUserModel" Messages="ChatUserMessages"> |
| 53 | + <FooterTemplate> |
| 54 | + <div class="e-footer-wrapper"> |
| 55 | + <div id="chatui-footer" class="content-editor" contenteditable="true" placeholder="Click to speak or start typing..." @oninput="@UpdateContent" @onkeydown="@OnKeyDown" @ref="@EditableDiv">@ChatUIFooterValue</div> |
| 56 | + <div class="option-container"> |
| 57 | + <SfSpeechToText ID="speechToText" TranscriptChanging="@OnTranscriptChange" SpeechRecognitionStopped="@HandleStopRecognition" |
| 58 | + CssClass="@($"e-flat {SpeechToTextCssClass}")" Disabled="@DisabledState"></SfSpeechToText> |
| 59 | + <SfButton ID="chatui-sendButton" IconCss="e-assist-send e-icons" CssClass="@ButtonCssClass" @onclick="SendButtonClicked"></SfButton> |
| 60 | + </div> |
| 61 | + </div> |
| 62 | + </FooterTemplate> |
| 63 | + </SfChatUI> |
| 64 | +</div> |
| 65 | +@code { |
| 66 | + private SfChatUI chatUI; |
| 67 | + private static UserModel CurrentUserModel = new UserModel() { ID = "User1", User = "Albert" }; |
| 68 | + private static UserModel MichaleUserModel = new UserModel() { ID = "User2", User = "Michale Suyama" }; |
| 69 | + private string ChatUIFooterValue = String.Empty; |
| 70 | + private ElementReference EditableDiv; |
| 71 | + private string FooterContent = String.Empty; |
| 72 | + private string SpeechToTextCssClass = "visible"; |
| 73 | + private string ButtonCssClass = String.Empty; |
| 74 | + private bool DisabledState = false; |
| 75 | + |
| 76 | + private List<ChatMessage> ChatUserMessages = new List<ChatMessage>() |
| 77 | + { |
| 78 | + new ChatMessage() { Text = "Hi Michale, are we on track for the deadline?", Author = CurrentUserModel }, |
| 79 | + new ChatMessage() { Text = "Yes, the design phase is complete.", Author = MichaleUserModel }, |
| 80 | + new ChatMessage() { Text = "I’ll review it and send feedback by today.", Author = CurrentUserModel } |
| 81 | + }; |
| 82 | + |
| 83 | + private async void OnTranscriptChange(TranscriptChangeEventArgs args) |
| 84 | + { |
| 85 | + ChatUIFooterValue = args.Transcript; |
| 86 | + await JSRuntime.InvokeVoidAsync("updateContentEditableDiv", EditableDiv, ChatUIFooterValue); |
| 87 | + await InvokeAsync(StateHasChanged); |
| 88 | + } |
| 89 | + private async Task UpdateContent() |
| 90 | + { |
| 91 | + FooterContent = await JSRuntime.InvokeAsync<String>("isFooterContainsValue", EditableDiv); |
| 92 | + ToggleVisibility(); |
| 93 | + } |
| 94 | + private async Task HandleStopRecognition() |
| 95 | + { |
| 96 | + FooterContent = ChatUIFooterValue; |
| 97 | + ToggleVisibility(); |
| 98 | + await InvokeAsync(StateHasChanged); |
| 99 | + } |
| 100 | + private void ToggleVisibility() |
| 101 | + { |
| 102 | + ButtonCssClass = string.IsNullOrWhiteSpace(FooterContent) ? "" : "visible"; |
| 103 | + SpeechToTextCssClass = string.IsNullOrWhiteSpace(FooterContent) ? "visible" : ""; |
| 104 | + } |
| 105 | + |
| 106 | + private async Task SendButtonClicked() |
| 107 | + { |
| 108 | + chatUI.Messages.Add(new ChatMessage() { Text = FooterContent, Author = CurrentUserModel }); |
| 109 | + ChatUIFooterValue = String.Empty; |
| 110 | + await JSRuntime.InvokeVoidAsync("emptyFooterValue", EditableDiv); |
| 111 | + await UpdateContent(); |
| 112 | + ToggleVisibility(); |
| 113 | + } |
| 114 | + private async Task OnKeyDown(KeyboardEventArgs e) |
| 115 | + { |
| 116 | + if (e.Key == "Enter" && !e.ShiftKey) |
| 117 | + { |
| 118 | + await SendButtonClicked(); |
| 119 | + } |
| 120 | + } |
| 121 | +} |
| 122 | + |
| 123 | +{% endhighlight %} |
| 124 | + |
| 125 | +{% highlight c# tabtitle="speechtotext.js" %} |
| 126 | + |
| 127 | +// Checks if the content editable element contains meaningful text and cleans up. |
| 128 | +function isFooterContainsValue(elementref) { |
| 129 | + if (!elementref.innerText.trim() !== '') { |
| 130 | + if ((elementref.innerHTML === '<br>' || elementref.innerHTML.trim() === '')) { |
| 131 | + elementref.innerHTML = ''; |
| 132 | + } |
| 133 | + } |
| 134 | + return elementref.innerText || ""; |
| 135 | +} |
| 136 | +// Clears the text content of a content editable element. |
| 137 | +function emptyFooterValue(elementref) { |
| 138 | + if (elementref) { |
| 139 | + elementref.innerText = ""; |
| 140 | + } |
| 141 | +} |
| 142 | +// Updates the text content of a content editable element with a specified value. |
| 143 | +function updateContentEditableDiv(element, value) { |
| 144 | + if (element) { |
| 145 | + element.innerText = value; |
| 146 | + } |
| 147 | +} |
| 148 | + |
| 149 | +{% endhighlight %} |
| 150 | + |
| 151 | +{% highlight c# tabtitle="speechtotext.css" %} |
| 152 | + |
| 153 | +.integration-speechtotext { |
| 154 | + height: 400px; |
| 155 | + width: 450px; |
| 156 | + margin: 0 auto; |
| 157 | +} |
| 158 | + |
| 159 | +.integration-speechtotext #chatui-sendButton { |
| 160 | + width: 40px; |
| 161 | + height: 40px; |
| 162 | + font-size: 15px; |
| 163 | + border: none; |
| 164 | + background: none; |
| 165 | + cursor: pointer; |
| 166 | +} |
| 167 | + |
| 168 | +.integration-speechtotext #speechToText.visible, |
| 169 | +.integration-speechtotext #chatui-sendButton.visible { |
| 170 | + display: inline-block; |
| 171 | +} |
| 172 | + |
| 173 | +.integration-speechtotext #speechToText, |
| 174 | +.integration-speechtotext #chatui-sendButton { |
| 175 | + display: none; |
| 176 | +} |
| 177 | + |
| 178 | +@@media only screen and (max-width: 750px) { |
| 179 | + .integration-speechtotext { |
| 180 | + width: 100%; |
| 181 | + } |
| 182 | +} |
| 183 | + |
| 184 | +.integration-speechtotext .e-footer-wrapper { |
| 185 | + display: flex; |
| 186 | + border: 1px solid #c1c1c1; |
| 187 | + margin: 5px 5px 0 5px; |
| 188 | + border-radius: 10px; |
| 189 | + padding: 5px; |
| 190 | +} |
| 191 | + |
| 192 | +.integration-speechtotext .content-editor { |
| 193 | + width: 100%; |
| 194 | + overflow-y: auto; |
| 195 | + font-size: 14px; |
| 196 | + min-height: 20px; |
| 197 | + max-height: 150px; |
| 198 | + padding: 10px; |
| 199 | +} |
| 200 | + |
| 201 | +.integration-speechtotext .content-editor[contentEditable='true']:empty:before { |
| 202 | + content: attr(placeholder); |
| 203 | + color: #6b7280; |
| 204 | + font-style: italic; |
| 205 | +} |
| 206 | + |
| 207 | +.integration-speechtotext .option-container { |
| 208 | + align-self: flex-end; |
| 209 | +} |
| 210 | + |
| 211 | +{% endhighlight %} |
| 212 | + |
| 213 | +{% endtabs %} |
| 214 | + |
| 215 | + |
| 216 | + |
| 217 | +## Error Handling |
| 218 | + |
| 219 | +The `SpeechToText` component provides events to handle errors that may occur during speech recognition. For more information, refer to the [Error Handling](https://blazor.syncfusion.com/documentation/speech-to-text/speech-recognition#error-handling) section in the documentation. |
| 220 | + |
| 221 | +## Browser Compatibility |
| 222 | + |
| 223 | +The `SpeechToText` component relies on the [Speech Recognition API](https://developer.mozilla.org/en-US/docs/Web/API/SpeechRecognition), which has limited browser support. Refer to the [Browser Compatibility](https://blazor.syncfusion.com/documentation/speech-to-text/speech-recognition#browser-support) section for detailed information. |
0 commit comments