Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
91 changes: 91 additions & 0 deletions src/components/models/code/DeepgramFlux.astro
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
---
import { z } from "astro:schema";
import { Code } from "@astrojs/starlight/components";
import Details from "~/components/Details.astro";

type Props = z.infer<typeof props>;

const props = z.object({
name: z.string(),
lora: z.boolean(),
});

const { name } = props.parse(Astro.props);

const worker = `
export default {
async fetch(request, env, ctx): Promise<Response> {
const resp = await env.AI.run("${name}", {
encoding: "linear16",
sample_rate: "16000"
}, {
websocket: true
});
return resp;
},
} satisfies ExportedHandler<Env>;
`;

const deployWorker = `
npx wrangler deploy
`;

const clientScript = `
const ws = new WebSocket('wss://<your-worker-url.com>');

ws.onopen = () => {
console.log('Connected to WebSocket');

// Generate and send random audio bytes
// You can replace this part with a function
// that reads from your mic or other audio source
const audioData = generateRandomAudio();
ws.send(audioData);
console.log('Audio data sent');
};

ws.onmessage = (event) => {
// Transcription will be received here
// Add your custom logic to parse the data
console.log('Received:', event.data);
};

ws.onerror = (error) => {
console.error('WebSocket error:', error);
};

ws.onclose = () => {
console.log('WebSocket closed');
};

// Generate random audio data (1 second of noise at 44.1kHz, mono)
function generateRandomAudio() {
const sampleRate = 44100;
const duration = 1;
const numSamples = sampleRate * duration;
const buffer = new ArrayBuffer(numSamples * 2);
const view = new Int16Array(buffer);

for (let i = 0; i < numSamples; i++) {
view[i] = Math.floor(Math.random() * 65536 - 32768);
}

return buffer;
}
`;

---

<>
<Details header="Step 1: Create a Worker that establishes a WebSocket connection">
<Code code={worker} lang="ts" />
</Details>

<Details header="Step 2: Deploy your Worker">
<Code code={deployWorker} lang="sh" />
</Details>

<Details header="Step 3: Write a client script to connect to your Worker and send audio">
<Code code={clientScript} lang="js" />
</Details>
</>
83 changes: 83 additions & 0 deletions src/content/changelog/workers-ai/2025-10-02-deepgram-flux.mdx
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
---
title: New Deepgram Flux model available on Workers AI
description: Partner voice activity detection model
date: 2025-10-02
---

Deepgram's newest Flux model [`@cf/deepgram/flux`](/workers-ai/models/flux/) is now available on Workers AI, hosted directly on Cloudflare's infrastructure. We're excited to be a launch partner with Deepgram and offer their new Speech Recognition model built specifically for enabling voice agents. Check out [Deepgram's blog](https://deepgram.com/flux) for more details on the release.

The Flux model can be used in conjunction with Deepgram's speech-to-text model [`@cf/deepgram/nova-3](/workers-ai/models/nova-3/) and text-to-speech model [`@cf/deepgram/aura-1](/workers-ai/models/aura-1/) to build end-to-end voice agents. Having Deepgram on Workers AI takes advantage of our edge GPU infrastructure, for ultra low latency voice AI applications.

## Promotional Pricing
For the month of October 2025, Deepgram's Flux model will be free to use on Workers AI. Official pricing will be announced soon and charged after the promotional pricing period ends on October 31, 2025. Check out the [model page](/workers-ai/models/flux/) for pricing details in the future.


## Example Usage

The new Flux model is WebSocket only as it requires live bi-directional streaming in order to recognize speech activity.

1. Create a worker that establishes a websocket connection with `@cf/deepgram/flux`

```js
export default {
async fetch(request, env, ctx): Promise<Response> {
const resp = await env.AI.run("@cf/deepgram/flux", {
encoding: "linear16",
sample_rate: "16000"
}, {
websocket: true
});
return resp;
},
} satisfies ExportedHandler<Env>;
```

2. Deploy your worker
```bash
npx wrangler deploy
```

3. Write a client script to connect to your worker and start sending random audio bytes to it
```js
const ws = new WebSocket('wss://<your-worker-url.com>');

ws.onopen = () => {
console.log('Connected to WebSocket');

// Generate and send random audio bytes
// You can replace this part with a function
// that reads from your mic or other audio source
const audioData = generateRandomAudio();
ws.send(audioData);
console.log('Audio data sent');
};

ws.onmessage = (event) => {
// Transcription will be received here
// Add your custom logic to parse the data
console.log('Received:', event.data);
};

ws.onerror = (error) => {
console.error('WebSocket error:', error);
};

ws.onclose = () => {
console.log('WebSocket closed');
};

// Generate random audio data (1 second of noise at 44.1kHz, mono)
function generateRandomAudio() {
const sampleRate = 44100;
const duration = 1;
const numSamples = sampleRate * duration;
const buffer = new ArrayBuffer(numSamples * 2);
const view = new Int16Array(buffer);

for (let i = 0; i < numSamples; i++) {
view[i] = Math.floor(Math.random() * 65536 - 32768);
}

return buffer;
}
```
5 changes: 5 additions & 0 deletions src/content/release-notes/workers-ai.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,11 @@ link: "/workers-ai/changelog/"
productName: Workers AI
productLink: "/workers-ai/"
entries:
- publish_date: "2025-10-02"
title: Deepgram Flux now available on Workers AI
description: |-
- We're excited to be a launch partner with Deepgram and offer their new Speech Recognition model built specifically for enabling voice agents. Check out [Deepgram's blog](https://deepgram.com/flux) for more details on the release.
- Access the model through [`@cf/deepgram/flux`](/workers-ai/models/flux/) and check out the [changelog](/changelog/2025-10-02-deepgram-flux/) for in-depth examples.
- publish_date: "2025-09-24"
title: New local models available on Workers AI
description: |-
Expand Down
146 changes: 146 additions & 0 deletions src/content/workers-ai-models/flux.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,146 @@
{
"id": "a2a2afba-b609-4325-8c41-5791ce962239",
"source": 1,
"name": "@cf/deepgram/flux",
"description": "Flux is the first conversational speech recognition model built specifically for voice agents.",
"task": {
"id": "dfce1c48-2a81-462e-a7fd-de97ce985207",
"name": "Automatic Speech Recognition",
"description": "Automatic speech recognition (ASR) models convert a speech signal, typically an audio input, to text."
},
"created_at": "2025-09-29 21:07:55.114",
"tags": [],
"properties": [
{
"property_id": "partner",
"value": "true"
},
{
"property_id": "realtime",
"value": "true"
}
],
"schema": {
"input": {
"type": "object",
"properties": {
"encoding": {
"type": "string",
"description": "Encoding of the audio stream. Currently only supports raw signed little-endian 16-bit PCM.",
"enum": [
"linear16"
]
},
"sample_rate": {
"type": "string",
"description": "Sample rate of the audio stream in Hz.",
"pattern": "^[0-9]+$"
},
"eager_eot_threshold": {
"type": "string",
"description": "End-of-turn confidence required to fire an eager end-of-turn event. When set, enables EagerEndOfTurn and TurnResumed events. Valid Values 0.3 - 0.9."
},
"eot_threshold": {
"type": "string",
"description": "End-of-turn confidence required to finish a turn. Valid Values 0.5 - 0.9.",
"default": "0.7"
},
"eot_timeout_ms": {
"type": "string",
"description": "A turn will be finished when this much time has passed after speech, regardless of EOT confidence.",
"default": "5000",
"pattern": "^[0-9]+$"
},
"keyterm": {
"type": "string",
"description": "Keyterm prompting can improve recognition of specialized terminology. Pass multiple keyterm query parameters to boost multiple keyterms."
},
"mip_opt_out": {
"type": "string",
"description": "Opts out requests from the Deepgram Model Improvement Program. Refer to Deepgram Docs for pricing impacts before setting this to true. https://dpgr.am/deepgram-mip",
"enum": [
"true",
"false"
],
"default": "false"
},
"tag": {
"type": "string",
"description": "Label your requests for the purpose of identification during usage reporting"
},
"required": [
"sample_rate",
"encoding"
]
}
},
"output": {
"type": "object",
"description": "Output will be returned as websocket messages.",
"properties": {
"request_id": {
"type": "string",
"description": "The unique identifier of the request (uuid)"
},
"sequence_id": {
"type": "integer",
"description": "Starts at 0 and increments for each message the server sends to the client.",
"minimum": 0
},
"event": {
"type": "string",
"description": "The type of event being reported.",
"enum": [
"Update",
"StartOfTurn",
"EagerEndOfTurn",
"TurnResumed",
"EndOfTurn"
]
},
"turn_index": {
"type": "integer",
"description": "The index of the current turn",
"minimum": 0
},
"audio_window_start": {
"type": "number",
"description": "Start time in seconds of the audio range that was transcribed"
},
"audio_window_end": {
"type": "number",
"description": "End time in seconds of the audio range that was transcribed"
},
"transcript": {
"type": "string",
"description": "Text that was said over the course of the current turn"
},
"words": {
"type": "array",
"description": "The words in the transcript",
"items": {
"type": "object",
"required": [
"word",
"confidence"
],
"properties": {
"word": {
"type": "string",
"description": "The individual punctuated, properly-cased word from the transcript"
},
"confidence": {
"type": "number",
"description": "Confidence that this word was transcribed correctly"
}
}
}
},
"end_of_turn_confidence": {
"type": "number",
"description": "Confidence that no more speech is coming in this turn"
}
}
}
}
}
15 changes: 15 additions & 0 deletions src/pages/workers-ai/models/[name].astro
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ import { authorData } from "~/components/models/data";
import OpenAIResponsesTextGenerationCode from "~/components/models/code/OpenAIResponsesTextGenerationCode.astro";
import DeepgramAura from "~/components/models/code/DeepgramAura.astro";
import DeepgramNova from "~/components/models/code/DeepgramNova.astro";
import DeepgramFlux from "~/components/models/code/DeepgramFlux.astro";

export const getStaticPaths = (async () => {
const models = await getCollection("workers-ai-models");
Expand Down Expand Up @@ -126,6 +127,10 @@ if (model.name === "@cf/deepgram/nova-3") {
CodeExamples = DeepgramNova;
}

if (model.name === "@cf/deepgram/flux") {
CodeExamples = DeepgramFlux;
}

const description = model.description;

const isBeta = model.properties.find(
Expand Down Expand Up @@ -221,6 +226,16 @@ const starlightPageProps = {
)
}

{
model.name === "@cf/deepgram/flux" && (
<Aside>
<p>
For the month of October 2025, Deepgram's Flux model will be free to use on Workers AI. Official pricing will be announced soon and charged after the promotional pricing period ends on October 31, 2025.
</p>
</Aside>
)
}

<ModelFeatures model={model} />
{
model.name === "@cf/deepgram/nova-3" && (
Expand Down
Loading