Skip to content

Commit 65d5703

Browse files
authored
Onboard to test framework (#666)
* ignore failing tests * move to nunit 4 patterns * upgrade nunit * first set of tests moved to ClientTestBase (auto sync/async) * next set to ClientTestBase * last few test files * move to dev feed version * add batch/chat tests * update * add chat tests * add audio tests * containers, assistants, responses * models and moderations * remaining tests * be more flexible in test environment * changes for pipeline * feedback * bump test proxy and test framework versions * update recordings * more recordings updates * update more recordings * merge conflicts and new recordings
1 parent 1fa5728 commit 65d5703

File tree

629 files changed

+469895
-4254
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

629 files changed

+469895
-4254
lines changed

.config/dotnet-tools.json

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,13 @@
88
"snippet-generator"
99
],
1010
"rollForward": false
11+
},
12+
"azure.sdk.tools.testproxy": {
13+
"version": "1.0.0-dev.20250922.2",
14+
"commands": [
15+
"test-proxy"
16+
],
17+
"rollForward": false
1118
}
1219
}
1320
}

tests/Assistants/Assistants.VectorStoresTests.cs

Lines changed: 124 additions & 302 deletions
Large diffs are not rendered by default.

tests/Assistants/AssistantsSmokeTests.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ public void ResponseFormatEquality()
7979
Assert.That((AssistantResponseFormat)null != AssistantResponseFormat.CreateTextFormat());
8080
Assert.That(AssistantResponseFormat.CreateTextFormat() != null);
8181
Assert.That(AssistantResponseFormat.CreateTextFormat(), Is.Not.EqualTo(null));
82-
Assert.That(null, Is.Not.EqualTo(AssistantResponseFormat.CreateTextFormat()));
82+
Assert.That(AssistantResponseFormat.CreateTextFormat(), Is.Not.Null);
8383

8484
AssistantResponseFormat jsonSchemaFormat = AssistantResponseFormat.CreateJsonSchemaFormat(
8585
name: "test_schema",

tests/Assistants/AssistantsTests.cs

Lines changed: 183 additions & 816 deletions
Large diffs are not rendered by default.
Lines changed: 4 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,16 @@
11
using System;
22
using System.ClientModel;
33
using System.Threading;
4+
using Microsoft.ClientModel.TestFramework;
45
using NUnit.Framework;
56
using OpenAI.Audio;
6-
using OpenAI.Tests.Utility;
77

88
namespace OpenAI.Tests.Audio;
99

10-
[TestFixture(true)]
11-
[TestFixture(false)]
1210
[Parallelizable(ParallelScope.All)]
1311
[Category("Audio")]
1412
[Category("Smoke")]
15-
internal class GenerateSpeechMockTests : SyncAsyncTestBase
13+
internal class GenerateSpeechMockTests : ClientTestBase
1614
{
1715
private static readonly ApiKeyCredential s_fakeCredential = new ApiKeyCredential("key");
1816

@@ -24,19 +22,11 @@ public GenerateSpeechMockTests(bool isAsync)
2422
[Test]
2523
public void GenerateSpeechRespectsTheCancellationToken()
2624
{
27-
AudioClient client = new AudioClient("model", s_fakeCredential);
25+
AudioClient client = CreateProxyFromClient(new AudioClient("model", s_fakeCredential));
2826
using CancellationTokenSource cancellationSource = new();
2927
cancellationSource.Cancel();
3028

31-
if (IsAsync)
32-
{
33-
Assert.That(async () => await client.GenerateSpeechAsync("text", GeneratedSpeechVoice.Echo, cancellationToken: cancellationSource.Token),
29+
Assert.That(async () => await client.GenerateSpeechAsync("text", GeneratedSpeechVoice.Echo, cancellationToken: cancellationSource.Token),
3430
Throws.InstanceOf<OperationCanceledException>());
35-
}
36-
else
37-
{
38-
Assert.That(() => client.GenerateSpeech("text", GeneratedSpeechVoice.Echo, cancellationToken: cancellationSource.Token),
39-
Throws.InstanceOf<OperationCanceledException>());
40-
}
4131
}
4232
}

tests/Audio/GenerateSpeechTests.cs

Lines changed: 11 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
1-
using NUnit.Framework;
1+
using Microsoft.ClientModel.TestFramework;
2+
using NUnit.Framework;
23
using OpenAI.Audio;
34
using OpenAI.Tests.Utility;
45
using System;
@@ -8,11 +9,8 @@
89

910
namespace OpenAI.Tests.Audio;
1011

11-
[TestFixture(true)]
12-
[TestFixture(false)]
13-
[Parallelizable(ParallelScope.All)]
1412
[Category("Audio")]
15-
public partial class GenerateSpeechTests : SyncAsyncTestBase
13+
public partial class GenerateSpeechTests : OpenAIRecordedTestBase
1614
{
1715
public GenerateSpeechTests(bool isAsync) : base(isAsync)
1816
{
@@ -21,14 +19,12 @@ public GenerateSpeechTests(bool isAsync) : base(isAsync)
2119
[Test]
2220
public async Task BasicTextToSpeechWorks()
2321
{
24-
AudioClient client = GetTestClient<AudioClient>(TestScenario.Audio_TTS);
22+
AudioClient client = GetProxiedOpenAIClient<AudioClient>(TestScenario.Audio_TTS);
2523

26-
BinaryData audio = IsAsync
27-
? await client.GenerateSpeechAsync("Hello, world! This is a test.", GeneratedSpeechVoice.Shimmer)
28-
: client.GenerateSpeech("Hello, world! This is a test.", GeneratedSpeechVoice.Shimmer);
24+
BinaryData audio = await client.GenerateSpeechAsync("Hello, world! This is a test.", GeneratedSpeechVoice.Shimmer);
2925

3026
Assert.That(audio, Is.Not.Null);
31-
ValidateGeneratedAudio(audio, "hello");
27+
await ValidateGeneratedAudio(audio, "hello");
3228
}
3329

3430
[Test]
@@ -41,7 +37,7 @@ public async Task BasicTextToSpeechWorks()
4137
[TestCase("pcm")]
4238
public async Task OutputFormatWorks(string responseFormat)
4339
{
44-
AudioClient client = GetTestClient<AudioClient>(TestScenario.Audio_TTS);
40+
AudioClient client = GetProxiedOpenAIClient<AudioClient>(TestScenario.Audio_TTS);
4541

4642
SpeechGenerationOptions options = new();
4743

@@ -59,9 +55,7 @@ public async Task OutputFormatWorks(string responseFormat)
5955
};
6056
}
6157

62-
BinaryData audio = IsAsync
63-
? await client.GenerateSpeechAsync("Hello, world!", GeneratedSpeechVoice.Alloy, options)
64-
: client.GenerateSpeech("Hello, world!", GeneratedSpeechVoice.Alloy, options);
58+
BinaryData audio = await client.GenerateSpeechAsync("Hello, world!", GeneratedSpeechVoice.Alloy, options);
6559

6660
Assert.That(audio, Is.Not.Null);
6761

@@ -82,10 +76,10 @@ public async Task OutputFormatWorks(string responseFormat)
8276
}
8377
}
8478

85-
private void ValidateGeneratedAudio(BinaryData audio, string expectedSubstring)
79+
private async Task ValidateGeneratedAudio(BinaryData audio, string expectedSubstring)
8680
{
87-
AudioClient client = GetTestClient<AudioClient>(TestScenario.Audio_Whisper);
88-
AudioTranscription transcription = client.TranscribeAudio(audio.ToStream(), "hello_world.wav");
81+
AudioClient client = GetProxiedOpenAIClient<AudioClient>(TestScenario.Audio_Whisper);
82+
AudioTranscription transcription = await client.TranscribeAudioAsync(audio.ToStream(), "hello_world.wav");
8983

9084
Assert.That(transcription.Text.ToLowerInvariant(), Contains.Substring(expectedSubstring));
9185
}

tests/Audio/TranscriptionMockTests.cs

Lines changed: 13 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -4,18 +4,17 @@
44
using System.Linq;
55
using System.Threading;
66
using System.Threading.Tasks;
7+
using Microsoft.ClientModel.TestFramework;
8+
using Microsoft.ClientModel.TestFramework.Mocks;
79
using NUnit.Framework;
810
using OpenAI.Audio;
9-
using OpenAI.Tests.Utility;
1011

1112
namespace OpenAI.Tests.Audio;
1213

13-
[TestFixture(true)]
14-
[TestFixture(false)]
1514
[Parallelizable(ParallelScope.All)]
1615
[Category("Audio")]
1716
[Category("Smoke")]
18-
public partial class TranscriptionMockTests : SyncAsyncTestBase
17+
public partial class TranscriptionMockTests : ClientTestBase
1918
{
2019
private static readonly ApiKeyCredential s_fakeCredential = new ApiKeyCredential("key");
2120

@@ -141,53 +140,43 @@ public async Task TranscribeAudioDeserializesSegment(AudioSourceKind audioSource
141140
[Test]
142141
public void TranscribeAudioFromStreamRespectsTheCancellationToken()
143142
{
144-
AudioClient client = new AudioClient("model", s_fakeCredential);
143+
AudioClient client = CreateProxyFromClient(new AudioClient("model", s_fakeCredential));
145144
using Stream stream = new MemoryStream();
146145
using CancellationTokenSource cancellationSource = new();
147146
cancellationSource.Cancel();
148147

149-
if (IsAsync)
150-
{
151-
Assert.That(async () => await client.TranscribeAudioAsync(stream, "filename", cancellationToken: cancellationSource.Token),
148+
Assert.That(async () => await client.TranscribeAudioAsync(stream, "filename", cancellationToken: cancellationSource.Token),
152149
Throws.InstanceOf<OperationCanceledException>());
153-
}
154-
else
155-
{
156-
Assert.That(() => client.TranscribeAudio(stream, "filename", cancellationToken: cancellationSource.Token),
157-
Throws.InstanceOf<OperationCanceledException>());
158-
}
159150
}
160151

161152
private OpenAIClientOptions GetClientOptionsWithMockResponse(int status, string content)
162153
{
163-
MockPipelineResponse response = new MockPipelineResponse(status);
164-
response.SetContent(content);
154+
MockPipelineResponse response = new MockPipelineResponse(status).WithContent(content);
165155

166156
return new OpenAIClientOptions()
167157
{
168-
Transport = new MockPipelineTransport(response)
158+
Transport = new MockPipelineTransport(_ => response)
159+
{
160+
ExpectSyncPipeline = !IsAsync
161+
}
169162
};
170163
}
171164

172165
private async ValueTask<AudioTranscription> InvokeTranscribeAudioSyncOrAsync(OpenAIClientOptions clientOptions, AudioSourceKind audioSourceKind)
173166
{
174-
AudioClient client = new AudioClient("model", s_fakeCredential, clientOptions);
167+
AudioClient client = CreateProxyFromClient(new AudioClient("model", s_fakeCredential, clientOptions));
175168
string filename = "audio_french.wav";
176169
string path = Path.Combine("Assets", filename);
177170

178171
if (audioSourceKind == AudioSourceKind.UsingStream)
179172
{
180173
using FileStream audio = File.OpenRead(path);
181174

182-
return IsAsync
183-
? await client.TranscribeAudioAsync(audio, filename)
184-
: client.TranscribeAudio(audio, filename);
175+
return await client.TranscribeAudioAsync(audio, filename);
185176
}
186177
else if (audioSourceKind == AudioSourceKind.UsingFilePath)
187178
{
188-
return IsAsync
189-
? await client.TranscribeAudioAsync(path)
190-
: client.TranscribeAudio(path);
179+
return await client.TranscribeAudioAsync(path);
191180
}
192181

193182
Assert.Fail("Invalid source kind.");

tests/Audio/TranscriptionTests.cs

Lines changed: 16 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
1-
using NUnit.Framework;
1+
using Microsoft.ClientModel.TestFramework;
2+
using NUnit.Framework;
23
using OpenAI.Audio;
34
using OpenAI.Tests.Utility;
45
using System;
@@ -8,16 +9,12 @@
89
using System.IO;
910
using System.Text;
1011
using System.Threading.Tasks;
11-
using System.Transactions;
1212
using static OpenAI.Tests.TestHelpers;
1313

1414
namespace OpenAI.Tests.Audio;
1515

16-
[TestFixture(true)]
17-
[TestFixture(false)]
18-
[Parallelizable(ParallelScope.All)]
1916
[Category("Audio")]
20-
public partial class TranscriptionTests : SyncAsyncTestBase
17+
public partial class TranscriptionTests : OpenAIRecordedTestBase
2118
{
2219
public TranscriptionTests(bool isAsync) : base(isAsync)
2320
{
@@ -34,7 +31,7 @@ public enum AudioSourceKind
3431
[TestCase(AudioSourceKind.UsingFilePath)]
3532
public async Task TranscriptionWorks(AudioSourceKind audioSourceKind)
3633
{
37-
AudioClient client = GetTestClient<AudioClient>(TestScenario.Audio_Whisper);
34+
AudioClient client = GetProxiedOpenAIClient<AudioClient>(TestScenario.Audio_Whisper);
3835
string filename = "audio_hello_world.mp3";
3936
string path = Path.Combine("Assets", filename);
4037
AudioTranscription transcription = null;
@@ -43,15 +40,11 @@ public async Task TranscriptionWorks(AudioSourceKind audioSourceKind)
4340
{
4441
using FileStream inputStream = File.OpenRead(path);
4542

46-
transcription = IsAsync
47-
? await client.TranscribeAudioAsync(inputStream, filename)
48-
: client.TranscribeAudio(inputStream, filename);
43+
transcription = await client.TranscribeAudioAsync(inputStream, filename);
4944
}
5045
else if (audioSourceKind == AudioSourceKind.UsingFilePath)
5146
{
52-
transcription = IsAsync
53-
? await client.TranscribeAudioAsync(path)
54-
: client.TranscribeAudio(path);
47+
transcription = await client.TranscribeAudioAsync(path);
5548
}
5649

5750
Assert.That(transcription, Is.Not.Null);
@@ -65,7 +58,7 @@ public async Task TranscriptionWorks(AudioSourceKind audioSourceKind)
6558
[TestCase(AudioTimestampGranularities.Word | AudioTimestampGranularities.Segment)]
6659
public async Task TimestampsWork(AudioTimestampGranularities granularityFlags)
6760
{
68-
AudioClient client = GetTestClient<AudioClient>(TestScenario.Audio_Whisper);
61+
AudioClient client = GetProxiedOpenAIClient<AudioClient>(TestScenario.Audio_Whisper);
6962

7063
using FileStream inputStream = File.OpenRead(Path.Combine("Assets", "audio_hello_world.mp3"));
7164

@@ -76,9 +69,7 @@ public async Task TimestampsWork(AudioTimestampGranularities granularityFlags)
7669
TimestampGranularities = granularityFlags,
7770
};
7871

79-
ClientResult<AudioTranscription> transcriptionResult = IsAsync
80-
? await client.TranscribeAudioAsync(inputStream, "audio_hello_world.mp3", options)
81-
: client.TranscribeAudio(inputStream, "audio_hello_world.mp3", options);
72+
ClientResult<AudioTranscription> transcriptionResult = await client.TranscribeAudioAsync(inputStream, "audio_hello_world.mp3", options);
8273

8374
PipelineResponse rawResponse = transcriptionResult.GetRawResponse();
8475
Assert.That(rawResponse.Content.ToString(), Is.Not.Null.And.Not.Empty);
@@ -140,7 +131,7 @@ public async Task TimestampsWork(AudioTimestampGranularities granularityFlags)
140131
[TestCase(null)]
141132
public async Task TranscriptionFormatsWork(string responseFormat)
142133
{
143-
AudioClient client = GetTestClient<AudioClient>(TestScenario.Audio_Whisper);
134+
AudioClient client = GetProxiedOpenAIClient<AudioClient>(TestScenario.Audio_Whisper);
144135
string path = Path.Combine("Assets", "audio_hello_world.mp3");
145136

146137
AudioTranscriptionOptions options = new()
@@ -156,9 +147,7 @@ public async Task TranscriptionFormatsWork(string responseFormat)
156147
}
157148
};
158149

159-
AudioTranscription transcription = IsAsync
160-
? await client.TranscribeAudioAsync(path, options)
161-
: client.TranscribeAudio(path, options);
150+
AudioTranscription transcription = await client.TranscribeAudioAsync(path, options);
162151

163152
Assert.That(transcription?.Text?.ToLowerInvariant(), Does.Contain("hello"));
164153

@@ -180,7 +169,6 @@ public async Task TranscriptionFormatsWork(string responseFormat)
180169

181170
Assert.That(segment.Id, Is.EqualTo(i));
182171
Assert.That(segment.EndTime, Is.GreaterThanOrEqualTo(segment.StartTime));
183-
Assert.That(segment.TokenIds, Is.Not.Null);
184172
Assert.That(segment.TokenIds.Length, Is.GreaterThan(0));
185173

186174
Assert.That(segment.AverageLogProbability, Is.LessThan(-0.001f).Or.GreaterThan(0.001f));
@@ -200,7 +188,7 @@ public async Task TranscriptionFormatsWork(string responseFormat)
200188
[Test]
201189
public async Task IncludesWork()
202190
{
203-
AudioClient client = GetTestClient<AudioClient>(TestScenario.Audio_Gpt_4o_Mini_Transcribe);
191+
AudioClient client = GetProxiedOpenAIClient<AudioClient>(TestScenario.Audio_Gpt_4o_Mini_Transcribe);
204192
string filename = "audio_hello_world.mp3";
205193
string path = Path.Combine("Assets", filename);
206194

@@ -218,7 +206,7 @@ public async Task IncludesWork()
218206
[Test]
219207
public async Task StreamingIncludesWork()
220208
{
221-
AudioClient client = GetTestClient<AudioClient>(TestScenario.Audio_Gpt_4o_Mini_Transcribe);
209+
AudioClient client = GetProxiedOpenAIClient<AudioClient>(TestScenario.Audio_Gpt_4o_Mini_Transcribe);
222210
string filename = "audio_hello_world.mp3";
223211
string path = Path.Combine("Assets", filename);
224212

@@ -252,7 +240,7 @@ in client.TranscribeAudioStreamingAsync(
252240
[Test]
253241
public async Task BadTranscriptionRequest()
254242
{
255-
AudioClient client = GetTestClient<AudioClient>(TestScenario.Audio_Whisper);
243+
AudioClient client = GetProxiedOpenAIClient<AudioClient>(TestScenario.Audio_Whisper);
256244

257245
string path = Path.Combine("Assets", "audio_hello_world.mp3");
258246

@@ -265,9 +253,7 @@ public async Task BadTranscriptionRequest()
265253

266254
try
267255
{
268-
_ = IsAsync
269-
? await client.TranscribeAudioAsync(path, options)
270-
: client.TranscribeAudio(path, options);
256+
await client.TranscribeAudioAsync(path, options);
271257
}
272258
catch (Exception ex)
273259
{
@@ -283,7 +269,7 @@ public async Task BadTranscriptionRequest()
283269
[TestCase(AudioSourceKind.UsingFilePath)]
284270
public async Task StreamingTranscriptionWorks(AudioSourceKind audioSourceKind)
285271
{
286-
AudioClient client = GetTestClient<AudioClient>(TestScenario.Audio_Gpt_4o_Mini_Transcribe);
272+
AudioClient client = GetProxiedOpenAIClient<AudioClient>(TestScenario.Audio_Gpt_4o_Mini_Transcribe);
287273
string filename = "audio_hello_world.mp3";
288274
string path = Path.Combine("Assets", filename);
289275

@@ -328,7 +314,7 @@ public async Task StreamingTranscriptionWorks(AudioSourceKind audioSourceKind)
328314
[TestCase(AudioSourceKind.UsingFilePath)]
329315
public void StreamingTranscriptionThrowsForWhisperModel(AudioSourceKind audioSourceKind)
330316
{
331-
AudioClient client = GetTestClient<AudioClient>(TestScenario.Audio_Whisper);
317+
AudioClient client = GetProxiedOpenAIClient<AudioClient>(TestScenario.Audio_Whisper);
332318
string filename = "audio_hello_world.mp3";
333319
string path = Path.Combine("Assets", filename);
334320

0 commit comments

Comments
 (0)