|
2 | 2 | import OpenAI from 'openai'; |
3 | 3 | import { |
4 | 4 | Agent, |
5 | | - VoicePipeline, |
6 | | - VoicePipelineConfig, |
7 | | - OpenAITTSModel, |
8 | | - OpenAISTTModel, |
9 | | - SingleAgentVoiceWorkflow, |
10 | | - tracing |
| 5 | + Voice |
11 | 6 | } from '../../src/index'; |
12 | 7 | import { serve } from 'bun'; |
13 | 8 | import { join } from 'path'; |
@@ -43,90 +38,57 @@ class WorkflowCallbacks { |
43 | 38 | } |
44 | 39 |
|
45 | 40 | // Configurando o workflow de voz |
46 | | -const workflow = new SingleAgentVoiceWorkflow(agent, { |
| 41 | +const workflow = new Voice.SingleAgentVoiceWorkflow(agent, { |
47 | 42 | callbacks: new WorkflowCallbacks() |
48 | 43 | }); |
49 | 44 |
|
50 | | -// Configurando o processador de tracing |
51 | | -const tracingProcessor = new tracing.OpenAITracingProcessor(); |
52 | | -tracing.addTraceProcessor(tracingProcessor); |
53 | | - |
54 | 45 | // Criando os modelos de STT e TTS |
55 | | -const sttModel = new OpenAISTTModel(client); |
56 | | -const ttsModel = new OpenAITTSModel(client); |
| 46 | +const sttModel = new Voice.OpenAISTTModel(client); |
| 47 | +const ttsModel = new Voice.OpenAITTSModel(client); |
57 | 48 |
|
58 | 49 | // Configurando o pipeline |
59 | | -const config = new VoicePipelineConfig({ |
| 50 | +const config = new Voice.VoicePipelineConfig({ |
60 | 51 | workflowName: "Exemplo de Voz", |
61 | | - tracingDisabled: false, |
62 | 52 | sttSettings: { |
63 | 53 | voice: 'coral' |
64 | 54 | }, |
65 | 55 | ttsSettings: { |
66 | 56 | voice: 'coral' |
67 | | - }, |
68 | | - traceMetadata: { |
69 | | - environment: "development", |
70 | | - application: "voice-demo" |
71 | 57 | } |
72 | 58 | }); |
73 | 59 |
|
74 | 60 |
|
75 | | -const pipeline = new VoicePipeline({ |
| 61 | +const pipeline = new Voice.VoicePipeline({ |
76 | 62 | workflow, |
77 | 63 | sttModel, |
78 | 64 | ttsModel, |
79 | 65 | config |
80 | 66 | }); |
81 | 67 |
|
82 | | -// Envolver as operações principais em traces |
83 | 68 | async function handleAudioRequest(audio) { |
84 | | - |
85 | 69 | console.log(`[debug] handleAudioRequest called with audio: ${audio}`); |
86 | 70 | const size = audio.size; |
87 | 71 | const type = audio.type; |
88 | 72 | console.log(`[debug] handleAudioRequest called with audio: ${size}, ${type}`); |
89 | | - |
90 | | - return await tracing.withTrace("voice_interaction", async (trace) => { |
91 | | - const spanData = new tracing.CustomSpanData("process_audio", { |
92 | | - id: "voice_stream_event_" + Date.now().toString(), |
93 | | - type: "voice_stream_event_audio", |
94 | | - input_size: size, |
95 | | - content_type: type |
96 | | - }); |
97 | | - return await tracing.withSpan("process_audio", async (span) => { |
98 | | - // Processar o áudio com o pipeline |
99 | | - const result = await pipeline.processAudio(audio); |
100 | | - return result; |
101 | | - }, spanData); |
102 | | - }, { |
103 | | - metadata: { |
104 | | - environment: "development", |
105 | | - application: "voice-demo" |
106 | | - } |
107 | | - }); |
| 73 | + // Processar o áudio com o pipeline diretamente |
| 74 | + const result = await pipeline.processAudio(audio); |
| 75 | + return result; |
108 | 76 | } |
109 | 77 |
|
110 | 78 | // Função para processar áudio |
111 | 79 | async function processAudio(audioBlob) { |
112 | | - const spanData = new tracing.CustomSpanData("audio_processing", { |
113 | | - type: "voice_stream_event_lifecycle", |
114 | | - event: "turn_started" |
115 | | - }); |
116 | | - return await tracing.withSpan("audio_processing", async (span) => { |
117 | | - // Ensure the blob is in the correct format |
118 | | - if (!(audioBlob instanceof Blob)) { |
119 | | - throw new Error('Input must be a Blob'); |
120 | | - } |
| 80 | + // Ensure the blob is in the correct format |
| 81 | + if (!(audioBlob instanceof Blob)) { |
| 82 | + throw new Error('Input must be a Blob'); |
| 83 | + } |
121 | 84 |
|
122 | | - const audioChunks = []; |
123 | | - for await (const chunk of await handleAudioRequest(audioBlob)) { |
124 | | - audioChunks.push(chunk); |
125 | | - } |
| 85 | + const audioChunks = []; |
| 86 | + for await (const chunk of await handleAudioRequest(audioBlob)) { |
| 87 | + audioChunks.push(chunk); |
| 88 | + } |
126 | 89 |
|
127 | | - // Keep the webm format as it's supported by OpenAI |
128 | | - return new Blob(audioChunks, { type: 'audio/webm' }); |
129 | | - }, spanData); |
| 90 | + // Keep the webm format as it's supported by OpenAI |
| 91 | + return new Blob(audioChunks, { type: 'audio/webm' }); |
130 | 92 | } |
131 | 93 |
|
132 | 94 | // Servidor Bun |
@@ -176,8 +138,7 @@ const server = serve({ |
176 | 138 | headers: { 'Content-Type': 'text/plain' } |
177 | 139 | }); |
178 | 140 | } finally { |
179 | | - // Garante que o processador de tracing seja desligado corretamente |
180 | | - tracingProcessor.shutdown(); |
| 141 | + // Nenhuma ação de tracing necessária |
181 | 142 | } |
182 | 143 | } |
183 | 144 | }); |
|
0 commit comments