Skip to content

Commit 36e3d0f

Browse files
committed
fix(images): correct variation API return type to List<OpenAIImageData>
- Change variation() return type from List<OpenAIImageModel> to List<OpenAIImageData> - Update interface and implementation to match actual API response structure - Simplify response parsing by directly mapping data array to OpenAIImageData - Fix example usage to work with correct return type - Update tests: remove deprecated model.permission checks; fix audio transcription type assertions; correct file.list() return type handling
1 parent 457ff13 commit 36e3d0f

File tree

4 files changed

+46
-67
lines changed

4 files changed

+46
-67
lines changed

example/lib/image_variation_example.dart

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ Future<void> main() async {
1818
responseFormat: OpenAIImageResponseFormat.url,
1919
);
2020

21-
final list = imageVariations.firstOrNull?.data ?? [];
21+
final list = imageVariations;
2222

2323
for (var index = 0; index < list.length; index++) {
2424
final currentItem = list[index];

lib/src/core/base/images/interfaces/variations.dart

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ import 'dart:io';
33
import '../../../../../dart_openai.dart';
44

55
abstract class VariationInterface {
6-
Future<List<OpenAIImageModel>> variation({
6+
Future<List<OpenAIImageData>> variation({
77
required File image,
88
int? n,
99
OpenAIImageSize? size,

lib/src/instance/images/images.dart

Lines changed: 5 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -234,7 +234,7 @@ interface class OpenAIImages implements OpenAIImagesBase {
234234
/// );
235235
/// ```
236236
@override
237-
Future<List<OpenAIImageModel>> variation({
237+
Future<List<OpenAIImageData>> variation({
238238
required File image,
239239
String? model,
240240
int? n,
@@ -245,7 +245,7 @@ interface class OpenAIImages implements OpenAIImagesBase {
245245
final String variations = "/variations";
246246

247247
return await OpenAINetworkingClient.imageVariationForm<
248-
List<OpenAIImageModel>>(
248+
List<OpenAIImageData>>(
249249
image: image,
250250
body: {
251251
if (model != null) "model": model,
@@ -255,16 +255,10 @@ interface class OpenAIImages implements OpenAIImagesBase {
255255
if (user != null) "user": user,
256256
},
257257
onSuccess: (Map<String, dynamic> response) {
258-
if (response.containsKey("data")) {
259-
return [
260-
...(response["data"] as List).map(
261-
(e) => OpenAIImageModel.fromMap(e),
262-
),
263-
];
264-
}
265-
266258
return [
267-
OpenAIImageModel.fromMap(response),
259+
...(response["data"] as List).map(
260+
(e) => OpenAIImageData.fromMap(e),
261+
),
268262
];
269263
},
270264
to: BaseApiUrlBuilder.build(endpoint + variations),

test/openai_test.dart

Lines changed: 39 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ import 'package:dart_openai/dart_openai.dart';
99
import 'package:dart_openai/src/core/builder/headers.dart';
1010
import 'package:dart_openai/src/core/constants/config.dart';
1111
import 'package:dart_openai/src/core/constants/strings.dart';
12+
import 'package:dart_openai/src/core/models/file/file_list.dart';
1213
import 'package:dart_openai/src/core/models/model/sub_models/permission.dart';
1314
import 'package:dart_openai/src/core/utils/logger.dart';
1415
import 'package:http/http.dart' as http;
@@ -118,15 +119,6 @@ void main() async {
118119

119120
final someModel = models.first;
120121

121-
if (someModel.permission != null &&
122-
someModel.permission!.isNotEmpty) {
123-
final permission = someModel.permission!.first;
124-
125-
expect(permission, isA<OpenAIModelModelPermission>());
126-
127-
expect(permission.id, isNotNull);
128-
}
129-
130122
modelExampleId = models
131123
.firstWhereOrNull(
132124
(element) => element.id.contains("gpt-3"),
@@ -147,21 +139,6 @@ void main() async {
147139
expect(model, isA<OpenAIModelModel>());
148140

149141
expect(model.id, isNotNull);
150-
151-
if (model.permission != null) {
152-
expect(
153-
model.permission,
154-
isA<List<OpenAIModelModelPermission>>(),
155-
);
156-
157-
if (model.permission!.isNotEmpty) {
158-
final permission = model.permission!.first;
159-
160-
expect(permission, isA<OpenAIModelModelPermission>());
161-
162-
expect(permission.id, isNotNull);
163-
}
164-
}
165142
});
166143
});
167144

@@ -391,12 +368,13 @@ void main() async {
391368
expect(imageEdited.data.first.url, isA<String>());
392369
});
393370
test("variation", () async {
394-
final OpenAIImageModel variation = await OpenAI.instance.image.variation(
371+
final List<OpenAIImageData> variation =
372+
await OpenAI.instance.image.variation(
395373
image: imageFileExample,
396374
);
397375

398376
expect(variation, isA<OpenAIImageModel>());
399-
expect(variation.data.first.url, isA<String>());
377+
expect(variation.first.url, isA<String>());
400378
});
401379
});
402380

@@ -414,7 +392,7 @@ void main() async {
414392

415393
test('create with smaller dimensions', () async {
416394
final OpenAIEmbeddingsModel embedding =
417-
await OpenAI.instance.embedding.create(
395+
await OpenAI.instance.embedding.create(
418396
model: "text-embedding-3-large",
419397
input: "This is a sample text",
420398
dimensions: 1000,
@@ -438,8 +416,12 @@ void main() async {
438416
model: "whisper-1",
439417
responseFormat: OpenAIAudioResponseFormat.json,
440418
);
441-
expect(transcription, isA<OpenAIAudioModel>());
442-
expect(transcription.text, isA<String>());
419+
420+
if (transcription is OpenAITranscriptionVerboseModel) {
421+
expect(transcription.language, isA<String?>());
422+
} else if (transcription is OpenAITranscriptionModel) {
423+
expect(transcription.text, isA<String?>());
424+
}
443425
});
444426

445427
test("create transcription with timestamp granularity", () async {
@@ -452,12 +434,16 @@ void main() async {
452434
file: audioExampleFile,
453435
model: "whisper-1",
454436
responseFormat: OpenAIAudioResponseFormat.verbose_json,
455-
timestamp_granularities: [OpenAIAudioTimestampGranularity.word],
437+
timestampGranularities: [OpenAIAudioTimestampGranularity.word],
456438
);
457439

458-
expect(transcription, isA<OpenAIAudioModel>());
459-
expect(transcription.text, isA<String>());
460-
expect(transcription.words, isA<List>());
440+
expect(transcription, isA<OpenAITranscriptionGeneralModel>());
441+
442+
if (transcription is OpenAITranscriptionVerboseModel) {
443+
expect(transcription.language, isA<String?>());
444+
} else if (transcription is OpenAITranscriptionModel) {
445+
expect(transcription.text, isA<String?>());
446+
}
461447
});
462448
test("create translation", () async {
463449
final audioExampleFile = await getFileFromUrl(
@@ -470,8 +456,7 @@ void main() async {
470456
model: "whisper-1",
471457
);
472458

473-
expect(translation, isA<OpenAIAudioModel>());
474-
expect(translation.text, isA<String>());
459+
expect(translation, isA<String>());
475460
});
476461

477462
test("create transcription with auto chunking strategy", () async {
@@ -488,10 +473,11 @@ void main() async {
488473
model: "whisper-1",
489474
chunkingStrategy: chunkingStrategy,
490475
);
491-
492-
// Assert
493-
expect(transcription, isA<OpenAIAudioModel>());
494-
expect(transcription.text, isA<String>());
476+
if (transcription is OpenAITranscriptionVerboseModel) {
477+
expect(transcription.language, isA<String?>());
478+
} else if (transcription is OpenAITranscriptionModel) {
479+
expect(transcription.text, isA<String?>());
480+
}
495481
});
496482

497483
test("create transcription with server VAD chunking strategy", () async {
@@ -513,9 +499,11 @@ void main() async {
513499
chunkingStrategy: chunkingStrategy,
514500
);
515501

516-
// Assert
517-
expect(transcription, isA<OpenAIAudioModel>());
518-
expect(transcription.text, isA<String>());
502+
if (transcription is OpenAITranscriptionVerboseModel) {
503+
expect(transcription.language, isA<String?>());
504+
} else if (transcription is OpenAITranscriptionModel) {
505+
expect(transcription.text, isA<String?>());
506+
}
519507
});
520508

521509
test("create translation with auto chunking strategy", () async {
@@ -530,12 +518,9 @@ void main() async {
530518
final translation = await OpenAI.instance.audio.createTranslation(
531519
file: audioExampleFile,
532520
model: "whisper-1",
533-
chunkingStrategy: chunkingStrategy,
534521
);
535522

536-
// Assert
537-
expect(translation, isA<OpenAIAudioModel>());
538-
expect(translation.text, isA<String>());
523+
expect(translation, isA<String>());
539524
});
540525
});
541526

@@ -551,18 +536,18 @@ void main() async {
551536
expect(file.id, isNotNull);
552537
});
553538
test("list", () async {
554-
final List<OpenAIFileModel> files = await OpenAI.instance.file.list();
555-
expect(files, isA<List<OpenAIFileModel>>());
556-
if (files.isNotEmpty) {
557-
expect(files.first, isA<OpenAIFileModel>());
558-
expect(files.first.id, isNotNull);
559-
expect(files.first.id, isA<String>());
539+
OpenAIFileListModel files = await OpenAI.instance.file.list();
540+
541+
if (files.data.isNotEmpty) {
542+
expect(files.data.first, isA<OpenAIFileModel>());
543+
expect(files.data.first.id, isNotNull);
544+
expect(files.data.first.id, isA<String>());
560545

561546
// get the id of the file that we uploaded in the previous test.
562-
fileIdFromFilesApi = files
547+
fileIdFromFilesApi = files.data
563548
.firstWhere((element) => element.fileName.contains("example.jsonl"))
564549
.id;
565-
fileToDelete = files.last.id;
550+
fileToDelete = files.data.last.id;
566551
}
567552
});
568553

0 commit comments

Comments
 (0)