Skip to content
Open
Show file tree
Hide file tree
Changes from 20 commits
Commits
Show all changes
37 commits
Select commit Hold shift + click to select a range
cbb8c19
Add audio-text-to-text task with datasets, demo, models, datsasets, s…
MrShahzebKhoso Aug 17, 2025
73cf5fe
Update about.md
MrShahzebKhoso Aug 17, 2025
bc4dfe7
Merge branch 'main' into add-audio-text-to-text-task
MrShahzebKhoso Aug 21, 2025
fd95ece
Merge branch 'main' into add-audio-text-to-text-task
MrShahzebKhoso Aug 25, 2025
b031fef
Merge branch 'main' into add-audio-text-to-text-task
MrShahzebKhoso Aug 25, 2025
ce51d87
Merge branch 'main' into add-audio-text-to-text-task
MrShahzebKhoso Aug 26, 2025
bf06eee
Merge branch 'main' into add-audio-text-to-text-task
MrShahzebKhoso Aug 27, 2025
0f6538e
Merge branch 'main' into add-audio-text-to-text-task
MrShahzebKhoso Aug 28, 2025
bd8733b
Update packages/tasks/src/tasks/audio-text-to-text/about.md
MrShahzebKhoso Aug 29, 2025
7930ccc
Update packages/tasks/src/tasks/audio-text-to-text/about.md
MrShahzebKhoso Aug 29, 2025
69a2b79
Update packages/tasks/src/tasks/audio-text-to-text/about.md
MrShahzebKhoso Aug 29, 2025
c91ccef
Update packages/tasks/src/tasks/audio-text-to-text/about.md
MrShahzebKhoso Aug 29, 2025
5dfde65
Merge branch 'main' into add-audio-text-to-text-task
MrShahzebKhoso Aug 30, 2025
c50a69c
Update about.md
MrShahzebKhoso Aug 31, 2025
5289f1b
Update packages/tasks/src/tasks/audio-text-to-text/about.md
MrShahzebKhoso Aug 31, 2025
9c4e6a4
Update packages/tasks/src/tasks/audio-text-to-text/about.md
MrShahzebKhoso Aug 31, 2025
e5438ec
Update packages/tasks/src/tasks/audio-text-to-text/about.md
MrShahzebKhoso Aug 31, 2025
4db1edc
Update packages/tasks/src/tasks/audio-text-to-text/about.md
MrShahzebKhoso Aug 31, 2025
e8f652e
Update packages/tasks/src/tasks/audio-text-to-text/about.md
MrShahzebKhoso Aug 31, 2025
b9f1c48
Update packages/tasks/src/tasks/audio-text-to-text/about.md
MrShahzebKhoso Aug 31, 2025
5fd050e
Update about.md
MrShahzebKhoso Aug 31, 2025
cdeaba0
Update packages/tasks/src/tasks/audio-text-to-text/about.md
MrShahzebKhoso Aug 31, 2025
a427dee
Update packages/tasks/src/tasks/audio-text-to-text/about.md
MrShahzebKhoso Aug 31, 2025
01efcb6
Merge branch 'main' into add-audio-text-to-text-task
MrShahzebKhoso Aug 31, 2025
ce2fdef
Update packages/tasks/src/tasks/audio-text-to-text/about.md
MrShahzebKhoso Sep 1, 2025
69b82f0
Update packages/tasks/src/tasks/audio-text-to-text/about.md
MrShahzebKhoso Sep 1, 2025
8096da6
Update packages/tasks/src/tasks/audio-text-to-text/about.md
MrShahzebKhoso Sep 1, 2025
7cf7f65
Merge branch 'main' into add-audio-text-to-text-task
MrShahzebKhoso Sep 1, 2025
ee2d130
Merge branch 'main' into add-audio-text-to-text-task
MrShahzebKhoso Sep 1, 2025
944ce81
Merge branch 'main' into add-audio-text-to-text-task
MrShahzebKhoso Sep 1, 2025
0b96556
Merge branch 'main' into add-audio-text-to-text-task
MrShahzebKhoso Sep 2, 2025
0a21659
Merge branch 'main' into add-audio-text-to-text-task
MrShahzebKhoso Sep 2, 2025
0087d9d
Merge branch 'main' into add-audio-text-to-text-task
MrShahzebKhoso Sep 2, 2025
26e63a7
Merge branch 'main' into add-audio-text-to-text-task
MrShahzebKhoso Sep 4, 2025
e108286
Merge branch 'main' into add-audio-text-to-text-task
MrShahzebKhoso Sep 4, 2025
f633bfb
Merge branch 'main' into add-audio-text-to-text-task
MrShahzebKhoso Sep 4, 2025
c6c878c
Merge branch 'main' into add-audio-text-to-text-task
MrShahzebKhoso Sep 5, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
180 changes: 180 additions & 0 deletions packages/tasks/src/tasks/audio-text-to-text/about.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,180 @@
## Use Cases

> This task takes `audio` and a `text prompt` and returns `text` (answers, summaries, structured notes, etc.).

### Audio question answering
Ask targeted questions about lectures, podcasts, or calls and get prices, and context-aware answers.
**Example:** Audio: physics lecture → Prompt: “What did the teacher say about gravity and how is it measured?”

### Meeting notes & action items
Turn multi-speaker meetings into concise minutes with decisions, owners, and deadlines.
**Example:** Audio: weekly stand-up → Prompt: “Summarize key decisions and list action items with assignees.”

### Speech understanding & intent
Go beyond transcription to extract intent, sentiment, uncertainty, or emotion from spoken language.
**Example:** “I’m not sure I can finish this on time.” → Prompt: “Describe speaker intent and confidence.”

### Music & sound analysis (textual)
Describe instrumentation, genre, tempo, or sections, and suggest edits or techniques (text output only).
**Example:** Song demo → Prompt: “Identify key and tempo, then suggest jazz reharmonization ideas for the chorus.”

## Inference
You can use 'transformers' library, and your audio file to any of the `audio-text-to-text` model, with instructions and get text responses. Following code examples show how to do so.

### Summarization / Q&A on a Single Audio
Run queries or request summaries directly from an audio clip.

```python
from transformers import VoxtralForConditionalGeneration, AutoProcessor
import torch

device = "cuda"
repo_id = "mistralai/Voxtral-Mini-3B-2507"

processor = AutoProcessor.from_pretrained(repo_id)
model = VoxtralForConditionalGeneration.from_pretrained(repo_id, torch_dtype=torch.bfloat16, device_map=device)

conversation = [
{
"role": "user",
"content": [
{
"type": "audio",
"path": "https://huggingface.co/datasets/hf-internal-testing/dummy-audio-samples/resolve/main/winning_call.mp3",
},
{"type": "text", "text": "Summarize this audio"},
],
}
]

inputs = processor.apply_chat_template(conversation)
inputs = inputs.to(device, dtype=torch.bfloat16)

outputs = model.generate(**inputs, max_new_tokens=500)
decoded_outputs = processor.batch_decode(outputs[:, inputs.input_ids.shape[1]:], skip_special_tokens=True)

print("\nGenerated response:")
print("=" * 80)
print(decoded_outputs[0])
print("=" * 80)
```

### Multiple Audio Querying
Pass multiple audio inputs in the same request and ask questions that compare or reference them.

```python
from transformers import VoxtralForConditionalGeneration, AutoProcessor
import torch

device = "cuda"
repo_id = "mistralai/Voxtral-Mini-3B-2507"

processor = AutoProcessor.from_pretrained(repo_id)
model = VoxtralForConditionalGeneration.from_pretrained(repo_id, torch_dtype=torch.bfloat16, device_map=device)

conversation = [
{
"role": "user",
"content": [
{
"type": "audio",
"path": "https://huggingface.co/datasets/hf-internal-testing/dummy-audio-samples/resolve/main/mary_had_lamb.mp3",
},
{
"type": "audio",
"path": "https://huggingface.co/datasets/hf-internal-testing/dummy-audio-samples/resolve/main/winning_call.mp3",
},
{"type": "text", "text": "What sport and what nursery rhyme are referenced?"},
],
}
]

inputs = processor.apply_chat_template(conversation)
inputs = inputs.to(device, dtype=torch.bfloat16)

outputs = model.generate(**inputs, max_new_tokens=500)
decoded_outputs = processor.batch_decode(outputs[:, inputs.input_ids.shape[1]:], skip_special_tokens=True)

print("\nGenerated response:")
print("=" * 80)
print(decoded_outputs[0])
print("=" * 80)
```

### Multi-Turn Conversation with Audio
Mix audio and text across multiple turns in a conversation, just like a dialogue with context.

```python
from transformers import VoxtralForConditionalGeneration, AutoProcessor
import torch

device = "cuda"
repo_id = "mistralai/Voxtral-Mini-3B-2507"

processor = AutoProcessor.from_pretrained(repo_id)
model = VoxtralForConditionalGeneration.from_pretrained(repo_id, torch_dtype=torch.bfloat16, device_map=device)

conversation = [
{
"role": "user",
"content": [
{
"type": "audio",
"path": "https://huggingface.co/datasets/hf-internal-testing/dummy-audio-samples/resolve/main/obama.mp3",
},
{
"type": "audio",
"path": "https://huggingface.co/datasets/hf-internal-testing/dummy-audio-samples/resolve/main/bcn_weather.mp3",
},
{"type": "text", "text": "Describe briefly what you can hear."},
],
},
{
"role": "assistant",
"content": "The audio begins with the speaker delivering a farewell address in Chicago, reflecting on his eight years as president and expressing gratitude to the American people. The audio then transitions to a weather report, stating that it was 35 degrees in Barcelona the previous day, but the temperature would drop to minus 20 degrees the following day.",
},
{
"role": "user",
"content": [
{
"type": "audio",
"path": "https://huggingface.co/datasets/hf-internal-testing/dummy-audio-samples/resolve/main/winning_call.mp3",
},
{"type": "text", "text": "Ok, now compare this new audio with the previous one."},
],
},
]

inputs = processor.apply_chat_template(conversation)
inputs = inputs.to(device, dtype=torch.bfloat16)

outputs = model.generate(**inputs, max_new_tokens=500)
decoded_outputs = processor.batch_decode(outputs[:, inputs.input_ids.shape[1]:], skip_special_tokens=True)

print("\nGenerated response:")
print("=" * 80)
print(decoded_outputs[0])
print("=" * 80)
```

## Useful Resources

If you want to learn more about this concept, here are some useful links:

### Papers
- [SpeechGPT (Paper)](https://huggingface.co/papers/2507.13264)
- [Voxtral (Paper)](https://huggingface.co/papers/2507.13264)
- [Qwen2-audio-instruct (Paper)](https://huggingface.co/papers/2407.10759)
- [AudioPaLM (Paper)](https://huggingface.co/papers/2306.12925)

### Blogs
- [Qwen2-audio-instruct (Blog)](https://qwenlm.github.io/blog/qwen2-audio/)

### Datasets
- [nvidia/AF-Think](https://huggingface.co/datasets/nvidia/AF-Think)
- [nvidia/AudioSkills](https://huggingface.co/datasets/nvidia/AudioSkills)

### Code & Demos
- [Qwen2-audio-instruct](https://github.com/QwenLM/Qwen2-Audio)
- [SpeechGPT](https://github.com/QwenLM/Qwen2-Audio)
- [AudioPaLM](https://github.com/0nutation/SpeechGPT)
70 changes: 70 additions & 0 deletions packages/tasks/src/tasks/audio-text-to-text/data.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
import type { TaskDataCustom } from "../index.js";

const taskData: TaskDataCustom = {
datasets: [
{
description: "A dataset containing audio conversations with question–answer pairs.",
id: "nvidia/AF-Think",
},
{
description: "A more advanced and comprehensive dataset that contains characteristics of the audio as well",
id: "tsinghua-ee/QualiSpeech",
},
],
demo:
{
inputs: [
{
filename: "audio.wav",
type: "audio",
},
{
label: "Text Prompt",
content: "What is the gender of the speaker?",
type: "text",
},
],
outputs: [
{
label: "Generated Text",
content:
"The gender of the speaker is female.",
type: "text",
},
],
},
metrics: [],
models: [
{
description: "A lightweight model that has capabilities of taking both audio and text as inputs and generating responses.",
id: "fixie-ai/ultravox-v0_5-llama-3_2-1b",
},
{
description: "A multimodal model that supports voice chat and audio analysis.",
id: "Qwen/Qwen2-Audio-7B-Instruct",
},
{
description: "A model for audio understanding, speech translation, and transcription.",
id: "mistralai/Voxtral-Small-24B-2507",
},
{
description: "A new model capable of audio question answering and reasoning.",
id: "nvidia/audio-flamingo-3",
},
],
spaces: [
{
description: "A space that takes input as both audio and text and generates answers.",
id: "iamomtiwari/ATTT",
},
{
description: "A web application that demonstrates chatting with the Qwen2Audio Model.",
id: "freddyaboulton/talk-to-qwen-webrtc",
},
],
summary: "Audio-text-to-text models take both an audio clip and a text prompt as input, and generate natural language text as output. These models can answer questions about spoken content, summarize meetings, analyze music, or interpret speech beyond simple transcription. They are useful for applications that combine speech understanding with reasoning or conversation.",
widgetModels: [],
youtubeId: "",
};

export default taskData;
Loading