Skip to content

Commit e4d235b

Browse files
committed
Add comprehensive test suite for OpenAI audio and chat response parsing and provider implementation
- Add test module for OpenAISpeechRequest serialization: cover required fields, optional field skipping, response_format/speed/instructions handling, all TTS models (tts-1/tts-1-hd/gpt-4o-mini-tts), all voices (alloy/ash/ballad/coral/echo/fable/onyx/nova/sage/shimmer), all response formats (mp3/opus/aac/flac/wav/pcm), speed range (0.25-4.0), unicode input, and long input - Add test module for OpenAI
1 parent d875050 commit e4d235b

File tree

10 files changed

+2047
-13
lines changed

10 files changed

+2047
-13
lines changed
Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,66 @@
1+
//! Embedding example using Ollama.
2+
//!
3+
//! ```bash
4+
//! ollama pull nomic-embed-text
5+
//! cargo run --example llm_ollama_embedding
6+
//! ```
7+
8+
#![allow(clippy::print_stdout)]
9+
10+
use machi::embedding::{EmbeddingProvider, EmbeddingRequest};
11+
use machi::prelude::*;
12+
13+
#[tokio::main]
14+
async fn main() -> Result<()> {
15+
let client = Ollama::with_defaults()?;
16+
17+
// Single text embedding
18+
let request = EmbeddingRequest::new("nomic-embed-text", vec!["Hello, world!".to_owned()]);
19+
20+
let response = client.embed(&request).await?;
21+
println!("Single embedding:");
22+
println!(" Dimension: {}", response.embeddings[0].vector.len());
23+
println!(
24+
" First 5 values: {:?}",
25+
&response.embeddings[0].vector[..5]
26+
);
27+
28+
// Batch embeddings for similarity comparison
29+
let texts = vec![
30+
"The cat sat on the mat.".to_owned(),
31+
"A feline rested on the rug.".to_owned(),
32+
"The stock market crashed today.".to_owned(),
33+
];
34+
35+
let request = EmbeddingRequest::new("nomic-embed-text", texts);
36+
let response = client.embed(&request).await?;
37+
38+
println!("\nBatch embeddings ({} texts):", response.embeddings.len());
39+
40+
// Calculate cosine similarity between embeddings
41+
let sim_0_1 = cosine_similarity(
42+
&response.embeddings[0].vector,
43+
&response.embeddings[1].vector,
44+
);
45+
let sim_0_2 = cosine_similarity(
46+
&response.embeddings[0].vector,
47+
&response.embeddings[2].vector,
48+
);
49+
50+
println!(" Similarity (cat/feline sentences): {sim_0_1:.4}");
51+
println!(" Similarity (cat/stock sentences): {sim_0_2:.4}");
52+
53+
if let Some(tokens) = response.total_tokens {
54+
println!("\nTotal tokens used: {tokens}");
55+
}
56+
57+
Ok(())
58+
}
59+
60+
/// Calculate cosine similarity between two vectors.
61+
fn cosine_similarity(a: &[f32], b: &[f32]) -> f32 {
62+
let dot: f32 = a.iter().zip(b.iter()).map(|(x, y)| x * y).sum();
63+
let norm_a: f32 = a.iter().map(|x| x * x).sum::<f32>().sqrt();
64+
let norm_b: f32 = b.iter().map(|x| x * x).sum::<f32>().sqrt();
65+
dot / (norm_a * norm_b)
66+
}

machi/examples/llm_openai_chat.rs

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
//! Basic chat example using OpenAI.
2+
//!
3+
//! ```bash
4+
//! export OPENAI_API_KEY=sk-...
5+
//! cargo run --example llm_openai_chat
6+
//! ```
7+
8+
#![allow(clippy::print_stdout)]
9+
10+
use machi::prelude::*;
11+
12+
#[tokio::main]
13+
async fn main() -> Result<()> {
14+
let client = OpenAI::from_env()?;
15+
16+
let request = ChatRequest::new("gpt-4o-mini")
17+
.system("You are a helpful assistant.")
18+
.user("What is the capital of France?");
19+
20+
let response = client.chat(&request).await?;
21+
println!("{}", response.text().unwrap_or_default());
22+
23+
Ok(())
24+
}
Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,66 @@
1+
//! Embedding example using OpenAI.
2+
//!
3+
//! ```bash
4+
//! export OPENAI_API_KEY=sk-...
5+
//! cargo run --example llm_openai_embedding
6+
//! ```
7+
8+
#![allow(clippy::print_stdout)]
9+
10+
use machi::embedding::{EmbeddingProvider, EmbeddingRequest};
11+
use machi::prelude::*;
12+
13+
#[tokio::main]
14+
async fn main() -> Result<()> {
15+
let client = OpenAI::from_env()?;
16+
17+
// Single text embedding
18+
let request = EmbeddingRequest::new("text-embedding-3-small", vec!["Hello, world!".to_owned()]);
19+
20+
let response = client.embed(&request).await?;
21+
println!("Single embedding:");
22+
println!(" Dimension: {}", response.embeddings[0].vector.len());
23+
println!(
24+
" First 5 values: {:?}",
25+
&response.embeddings[0].vector[..5]
26+
);
27+
28+
// Batch embeddings for similarity comparison
29+
let texts = vec![
30+
"The cat sat on the mat.".to_owned(),
31+
"A feline rested on the rug.".to_owned(),
32+
"The stock market crashed today.".to_owned(),
33+
];
34+
35+
let request = EmbeddingRequest::new("text-embedding-3-small", texts);
36+
let response = client.embed(&request).await?;
37+
38+
println!("\nBatch embeddings ({} texts):", response.embeddings.len());
39+
40+
// Calculate cosine similarity between embeddings
41+
let sim_0_1 = cosine_similarity(
42+
&response.embeddings[0].vector,
43+
&response.embeddings[1].vector,
44+
);
45+
let sim_0_2 = cosine_similarity(
46+
&response.embeddings[0].vector,
47+
&response.embeddings[2].vector,
48+
);
49+
50+
println!(" Similarity (cat/feline sentences): {sim_0_1:.4}");
51+
println!(" Similarity (cat/stock sentences): {sim_0_2:.4}");
52+
53+
if let Some(usage) = &response.usage {
54+
println!("\nTokens used: {}", usage.total_tokens);
55+
}
56+
57+
Ok(())
58+
}
59+
60+
/// Calculate cosine similarity between two vectors.
61+
fn cosine_similarity(a: &[f32], b: &[f32]) -> f32 {
62+
let dot: f32 = a.iter().zip(b.iter()).map(|(x, y)| x * y).sum();
63+
let norm_a: f32 = a.iter().map(|x| x * x).sum::<f32>().sqrt();
64+
let norm_b: f32 = b.iter().map(|x| x * x).sum::<f32>().sqrt();
65+
dot / (norm_a * norm_b)
66+
}
Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
//! Streaming chat example using OpenAI.
2+
//!
3+
//! ```bash
4+
//! export OPENAI_API_KEY=sk-...
5+
//! cargo run --example llm_openai_stream
6+
//! ```
7+
8+
#![allow(clippy::print_stdout)]
9+
10+
use futures::StreamExt;
11+
use machi::prelude::*;
12+
use std::io::{Write, stdout};
13+
14+
#[tokio::main]
15+
async fn main() -> Result<()> {
16+
let client = OpenAI::from_env()?;
17+
18+
let request = ChatRequest::new("gpt-4o-mini").user("Write a haiku about Rust.");
19+
20+
let mut stream = client.chat_stream(&request).await?;
21+
22+
while let Some(chunk) = stream.next().await {
23+
if let StreamChunk::Text(text) = chunk? {
24+
print!("{text}");
25+
stdout().flush()?;
26+
}
27+
}
28+
println!();
29+
30+
Ok(())
31+
}

machi/examples/llm_openai_stt.rs

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
//! Speech-to-Text example using OpenAI.
2+
//!
3+
//! ```bash
4+
//! export OPENAI_API_KEY=sk-...
5+
//! cargo run --example llm_openai_stt
6+
//! ```
7+
8+
#![allow(clippy::print_stdout)]
9+
10+
use machi::audio::{AudioFormat, SpeechToTextProvider, TranscriptionRequest};
11+
use machi::prelude::*;
12+
use std::fs;
13+
14+
#[tokio::main]
15+
async fn main() -> Result<()> {
16+
let client = OpenAI::from_env()?;
17+
18+
// Read audio file
19+
let audio_data = fs::read("machi/examples/data/en-us-natural-speech.mp3")?;
20+
21+
// Transcribe
22+
let request = TranscriptionRequest::new("whisper-1", audio_data).format(AudioFormat::Mp3);
23+
let response = client.transcribe(&request).await?;
24+
25+
println!("\nTranscription:");
26+
println!("{}", response.text);
27+
28+
if let Some(lang) = &response.language {
29+
println!("\nDetected language: {lang}");
30+
}
31+
if let Some(duration) = response.duration {
32+
println!("Duration: {duration:.2}s");
33+
}
34+
35+
Ok(())
36+
}

machi/examples/llm_openai_tts.rs

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
//! Text-to-Speech example using OpenAI.
2+
//!
3+
//! ```bash
4+
//! export OPENAI_API_KEY=sk-...
5+
//! cargo run --example llm_openai_tts
6+
//! ```
7+
8+
#![allow(clippy::print_stdout)]
9+
10+
use machi::audio::{SpeechRequest, TextToSpeechProvider};
11+
use machi::prelude::*;
12+
use std::fs;
13+
14+
#[tokio::main]
15+
async fn main() -> Result<()> {
16+
let client = OpenAI::from_env()?;
17+
18+
let request = SpeechRequest::new(
19+
"tts-1",
20+
"Hello! This is a test of OpenAI's text-to-speech API.",
21+
"nova",
22+
);
23+
24+
println!("\nGenerating speech...");
25+
let response = client.speech(&request).await?;
26+
27+
let output_path = "output.mp3";
28+
fs::write(output_path, &response.audio)?;
29+
println!(
30+
"Audio saved to: {output_path} ({} bytes)",
31+
response.audio.len()
32+
);
33+
34+
Ok(())
35+
}

0 commit comments

Comments
 (0)