88 <!-- 语音播放 -->
99 <span v-if =" tts" >
1010 <el-tooltip
11+ v-if =" audioManage?.isPlaying()"
1112 effect =" dark"
12- :content =" $t('chat.operation.play ')"
13+ :content =" $t('chat.operation.pause ')"
1314 placement =" top"
14- v-if =" !audioPlayerStatus"
1515 >
16+ <el-button
17+ type =" primary"
18+ text
19+ :disabled =" !data?.write_ed"
20+ @click =" audioManage?.pause(true)"
21+ >
22+ <AppIcon iconName =" app-video-pause" ></AppIcon >
23+ </el-button >
24+ </el-tooltip >
25+ <el-tooltip effect =" dark" :content =" $t('chat.operation.play')" placement =" top" v-else >
1626 <el-button
1727 text
1828 :disabled =" !data?.write_ed"
2636 <AppIcon iconName =" app-video-play" ></AppIcon >
2737 </el-button >
2838 </el-tooltip >
29- <el-tooltip v-else effect =" dark" :content =" $t('chat.operation.pause')" placement =" top" >
30- <el-button
31- type =" primary"
32- text
33- :disabled =" !data?.write_ed"
34- @click =" audioManage?.pause.bind(audioManage)"
35- >
36- <AppIcon iconName =" app-video-pause" ></AppIcon >
37- </el-button >
38- </el-tooltip >
39+
3940 <el-divider direction =" vertical" />
4041 </span >
4142 <span v-if =" type == 'ai-chat' || type == 'log'" >
100101 </div >
101102</template >
102103<script setup lang="ts">
103- import { onMounted , ref , computed } from ' vue'
104+ import { nextTick , onMounted , ref } from ' vue'
104105import { useRoute } from ' vue-router'
105106import { copyClick } from ' @/utils/clipboard'
106107import applicationApi from ' @/api/application'
107108import { datetimeFormat } from ' @/utils/time'
108109import { MsgError } from ' @/utils/message'
109- import { t } from ' @/locales'
110110import bus from ' @/bus'
111+ import { da } from ' element-plus/es/locale'
111112
112113const route = useRoute ()
113114const {
@@ -138,12 +139,9 @@ const audioCiontainer = ref<HTMLDivElement>()
138139const audioPlayerStatus = ref (false )
139140const buttonData = ref (props .data )
140141const loading = ref (false )
141- const utterance = ref < SpeechSynthesisUtterance | null >( null )
142+
142143const audioList = ref <string []>([])
143- const currentAudioIndex = ref (0 )
144- const demo = computed (() => {
145- return props .data .answer_text
146- })
144+
147145function regeneration() {
148146 emit (' regeneration' )
149147}
@@ -247,14 +245,6 @@ enum AudioStatus {
247245 * 播放中
248246 */
249247 PLAY_INT = ' PLAY_INT' ,
250- /**
251- * 手动暂停
252- */
253- PAUSE = ' PAUSE' ,
254- /**
255- * 等待 程序流式输出新分段未出来
256- */
257- WAIT = ' WAIT' ,
258248 /**
259249 * 刚挂载
260250 */
@@ -263,7 +253,9 @@ enum AudioStatus {
263253 * 就绪
264254 */
265255 READY = ' READY' ,
266-
256+ /**
257+ * 错误
258+ */
267259 ERROR = ' ERROR'
268260}
269261class AudioManage {
@@ -285,41 +277,6 @@ class AudioManage {
285277 if (newTextList .length <= 0 ) {
286278 return
287279 }
288- this .statusList .forEach ((status , index ) => {
289- if (status === AudioStatus .ERROR ) {
290- const audioElement = this .audioList [index ]
291- if (audioElement instanceof HTMLAudioElement ) {
292- const text = this .textList [index ]
293- applicationApi
294- .postTextToSpeech (
295- (props .applicationId as string ) || (id as string ),
296- { text: text },
297- loading
298- )
299- .then (async (res : any ) => {
300- if (res .type === ' application/json' ) {
301- const text = await res .text ()
302- MsgError (text )
303- this .statusList [index ] = AudioStatus .ERROR
304- return
305- }
306- // 假设我们有一个 MP3 文件的字节数组
307- // 创建 Blob 对象
308- const blob = new Blob ([res ], { type: ' audio/mp3' })
309-
310- // 创建对象 URL
311- const url = URL .createObjectURL (blob )
312- audioElement .src = url
313- this .statusList [index ] = AudioStatus .READY
314- this .play ()
315- })
316- .catch ((err ) => {
317- console .log (' err: ' , err )
318- this .statusList [index ] = AudioStatus .ERROR
319- })
320- }
321- }
322- })
323280 newTextList .forEach ((text , index ) => {
324281 this .textList .push (text )
325282 this .statusList .push (AudioStatus .MOUNTED )
@@ -333,9 +290,11 @@ class AudioManage {
333290 */
334291 audioElement .onended = () => {
335292 this .statusList [index ] = AudioStatus .END
293+ // 如果所有的节点都播放结束
336294 if (this .statusList .every ((item ) => item === AudioStatus .END )) {
337295 this .statusList = this .statusList .map ((item ) => AudioStatus .READY )
338296 } else {
297+ // next
339298 this .play ()
340299 }
341300 }
@@ -380,58 +339,130 @@ class AudioManage {
380339 const speechSynthesisUtterance: SpeechSynthesisUtterance = new SpeechSynthesisUtterance (
381340 text
382341 )
342+ speechSynthesisUtterance .onpause = () => {
343+ console .log (' onpause' )
344+ }
383345 speechSynthesisUtterance .onend = () => {
384346 this .statusList [index ] = AudioStatus .END
347+ // 如果所有的节点都播放结束
348+ if (this .statusList .every ((item ) => item === AudioStatus .END )) {
349+ this .statusList = this .statusList .map ((item ) => AudioStatus .READY )
350+ } else {
351+ // next
352+ this .play ()
353+ }
385354 }
355+ speechSynthesisUtterance .onerror = (e ) => {
356+ this .statusList [index ] = AudioStatus .READY
357+ }
358+
386359 this .statusList [index ] = AudioStatus .READY
387360 this .audioList .push (speechSynthesisUtterance )
361+ this .play ()
362+ }
363+ })
364+ }
365+ reTryError() {
366+ this .statusList .forEach ((status , index ) => {
367+ if (status === AudioStatus .ERROR ) {
368+ const audioElement = this .audioList [index ]
369+ if (audioElement instanceof HTMLAudioElement ) {
370+ const text = this .textList [index ]
371+ applicationApi
372+ .postTextToSpeech (
373+ (props .applicationId as string ) || (id as string ),
374+ { text: text },
375+ loading
376+ )
377+ .then (async (res : any ) => {
378+ if (res .type === ' application/json' ) {
379+ const text = await res .text ()
380+ MsgError (text )
381+ this .statusList [index ] = AudioStatus .ERROR
382+ return
383+ }
384+ // 假设我们有一个 MP3 文件的字节数组
385+ // 创建 Blob 对象
386+ const blob = new Blob ([res ], { type: ' audio/mp3' })
387+
388+ // 创建对象 URL
389+ const url = URL .createObjectURL (blob )
390+ audioElement .src = url
391+ this .statusList [index ] = AudioStatus .READY
392+ this .play ()
393+ })
394+ .catch ((err ) => {
395+ console .log (' err: ' , err )
396+ this .statusList [index ] = AudioStatus .ERROR
397+ })
398+ }
388399 }
389400 })
390401 }
402+ isPlaying() {
403+ return this .statusList .some ((item ) => [AudioStatus .PLAY_INT ].includes (item ))
404+ }
391405 play(text ? : string , is_end ? : boolean ) {
392406 if (text ) {
393407 const textList = this .getTextList (text , is_end ? true : false )
394408 this .appendTextList (textList )
395409 }
396-
397410 // 如果存在在阅读的元素则直接返回
398- if (this .statusList .some ((item ) => [AudioStatus .PAUSE , AudioStatus . PLAY_INT ].includes (item ))) {
411+ if (this .statusList .some ((item ) => [AudioStatus .PLAY_INT ].includes (item ))) {
399412 return
400413 }
414+ this .reTryError ()
415+
401416 // 需要播放的内容
402- const index = this .statusList .findIndex ((status ) =>
403- [AudioStatus .READY , AudioStatus .MOUNTED ].includes (status )
404- )
417+ const index = this .statusList .findIndex ((status ) => [AudioStatus .READY ].includes (status ))
405418
406419 if (index < 0 || this .statusList [index ] === AudioStatus .MOUNTED ) {
407420 return
408421 }
409- console . log ( index , this . audioList , this . statusList )
422+
410423 const audioElement = this .audioList [index ]
411424 if (audioElement instanceof SpeechSynthesisUtterance ) {
412- this .statusList [index ] = AudioStatus .PLAY_INT
413- // 调用浏览器的朗读功能
414- window .speechSynthesis .speak (audioElement )
425+ if (window .speechSynthesis .paused ) {
426+ window .speechSynthesis .resume ()
427+ } else {
428+ if (window .speechSynthesis .pending ) {
429+ window .speechSynthesis .cancel ()
430+ }
431+ speechSynthesis .speak (audioElement )
432+ this .statusList [index ] = AudioStatus .PLAY_INT
433+ }
415434 } else {
416435 // 标签朗读
417- this .statusList [index ] = AudioStatus .PLAY_INT
418- audioElement .play ()
436+ try {
437+ audioElement .play ()
438+ this .statusList [index ] = AudioStatus .PLAY_INT
439+ } catch (e ) {
440+ this .statusList [index ] = AudioStatus .ERROR
441+ }
419442 }
420443 }
421- pause() {
444+ pause(self ? : boolean ) {
422445 const index = this .statusList .findIndex ((status ) => status === AudioStatus .PLAY_INT )
423446 if (index < 0 ) {
424447 return
425448 }
426449 const audioElement = this .audioList [index ]
427450 if (audioElement instanceof SpeechSynthesisUtterance ) {
428- this .statusList [index ] = AudioStatus .PAUSE
429- // 调用浏览器的朗读功能
430- window .speechSynthesis .pause ()
451+ this .statusList [index ] = AudioStatus .READY
452+ if (self ) {
453+ window .speechSynthesis .pause ()
454+ nextTick (() => {
455+ if (! window .speechSynthesis .paused ) {
456+ window .speechSynthesis .cancel ()
457+ }
458+ })
459+ } else {
460+ window .speechSynthesis .cancel ()
461+ }
431462 } else {
432463 if (this .statusList [index ] === AudioStatus .PLAY_INT ) {
433464 // 标签朗读
434- this .statusList [index ] = AudioStatus .PAUSE
465+ this .statusList [index ] = AudioStatus .READY
435466 audioElement .pause ()
436467 }
437468 }
@@ -470,9 +501,9 @@ onMounted(() => {
470501 const record_id = data .record_id
471502 bus .emit (' play:pause' , record_id )
472503 if (props .data .record_id == record_id ) {
473- if (props .tts ) {
504+ if (props .tts && props . tts_autoplay ) {
474505 if (audioManage .value ) {
475- audioManage .value .play (props .data .answer_text )
506+ audioManage .value .play (props .data .answer_text , data . is_end )
476507 }
477508 }
478509 }
0 commit comments