Skip to content

Commit d8d5a52

Browse files
test(ai): update unit tests to use updated mocks
1 parent dd8782a commit d8d5a52

File tree

4 files changed

+54
-17
lines changed

4 files changed

+54
-17
lines changed

packages/ai/__tests__/generative-model.test.ts

Lines changed: 26 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ import { type ReactNativeFirebase } from '@react-native-firebase/app';
1919
import { GenerativeModel } from '../lib/models/generative-model';
2020
import { AI, FunctionCallingMode } from '../lib/public-types';
2121
import * as request from '../lib/requests/request';
22-
import { getMockResponse } from './test-utils/mock-response';
22+
import { BackendName, getMockResponse } from './test-utils/mock-response';
2323
import { VertexAIBackend } from '../lib/backend';
2424

2525
const fakeAI: AI = {
@@ -56,7 +56,10 @@ describe('GenerativeModel', () => {
5656
expect(genModel.tools?.length).toBe(1);
5757
expect(genModel.toolConfig?.functionCallingConfig?.mode).toBe(FunctionCallingMode.NONE);
5858
expect(genModel.systemInstruction?.parts[0]!.text).toBe('be friendly');
59-
const mockResponse = getMockResponse('unary-success-basic-reply-short.json');
59+
const mockResponse = getMockResponse(
60+
BackendName.VertexAI,
61+
'unary-success-basic-reply-short.json',
62+
);
6063
const makeRequestStub = jest
6164
.spyOn(request, 'makeRequest')
6265
.mockResolvedValue(mockResponse as Response);
@@ -78,7 +81,10 @@ describe('GenerativeModel', () => {
7881
systemInstruction: 'be friendly',
7982
});
8083
expect(genModel.systemInstruction?.parts[0]!.text).toBe('be friendly');
81-
const mockResponse = getMockResponse('unary-success-basic-reply-short.json');
84+
const mockResponse = getMockResponse(
85+
BackendName.VertexAI,
86+
'unary-success-basic-reply-short.json',
87+
);
8288
const makeRequestStub = jest
8389
.spyOn(request, 'makeRequest')
8490
.mockResolvedValue(mockResponse as Response);
@@ -113,7 +119,10 @@ describe('GenerativeModel', () => {
113119
expect(genModel.tools?.length).toBe(1);
114120
expect(genModel.toolConfig?.functionCallingConfig?.mode).toBe(FunctionCallingMode.NONE);
115121
expect(genModel.systemInstruction?.parts[0]!.text).toBe('be friendly');
116-
const mockResponse = getMockResponse('unary-success-basic-reply-short.json');
122+
const mockResponse = getMockResponse(
123+
BackendName.VertexAI,
124+
'unary-success-basic-reply-short.json',
125+
);
117126
const makeRequestStub = jest
118127
.spyOn(request, 'makeRequest')
119128
.mockResolvedValue(mockResponse as Response);
@@ -178,7 +187,10 @@ describe('GenerativeModel', () => {
178187
expect(genModel.tools?.length).toBe(1);
179188
expect(genModel.toolConfig?.functionCallingConfig?.mode).toBe(FunctionCallingMode.NONE);
180189
expect(genModel.systemInstruction?.parts[0]!.text).toBe('be friendly');
181-
const mockResponse = getMockResponse('unary-success-basic-reply-short.json');
190+
const mockResponse = getMockResponse(
191+
BackendName.VertexAI,
192+
'unary-success-basic-reply-short.json',
193+
);
182194
const makeRequestStub = jest
183195
.spyOn(request, 'makeRequest')
184196
.mockResolvedValue(mockResponse as Response);
@@ -200,7 +212,10 @@ describe('GenerativeModel', () => {
200212
systemInstruction: 'be friendly',
201213
});
202214
expect(genModel.systemInstruction?.parts[0]!.text).toBe('be friendly');
203-
const mockResponse = getMockResponse('unary-success-basic-reply-short.json');
215+
const mockResponse = getMockResponse(
216+
BackendName.VertexAI,
217+
'unary-success-basic-reply-short.json',
218+
);
204219
const makeRequestStub = jest
205220
.spyOn(request, 'makeRequest')
206221
.mockResolvedValue(mockResponse as Response);
@@ -226,7 +241,10 @@ describe('GenerativeModel', () => {
226241
expect(genModel.tools?.length).toBe(1);
227242
expect(genModel.toolConfig?.functionCallingConfig?.mode).toBe(FunctionCallingMode.NONE);
228243
expect(genModel.systemInstruction?.parts[0]!.text).toBe('be friendly');
229-
const mockResponse = getMockResponse('unary-success-basic-reply-short.json');
244+
const mockResponse = getMockResponse(
245+
BackendName.VertexAI,
246+
'unary-success-basic-reply-short.json',
247+
);
230248
const makeRequestStub = jest
231249
.spyOn(request, 'makeRequest')
232250
.mockResolvedValue(mockResponse as Response);
@@ -256,7 +274,7 @@ describe('GenerativeModel', () => {
256274

257275
it('calls countTokens', async () => {
258276
const genModel = new GenerativeModel(fakeAI, { model: 'my-model' });
259-
const mockResponse = getMockResponse('unary-success-total-tokens.json');
277+
const mockResponse = getMockResponse(BackendName.VertexAI, 'unary-success-total-tokens.json');
260278
const makeRequestStub = jest
261279
.spyOn(request, 'makeRequest')
262280
.mockResolvedValue(mockResponse as Response);

packages/ai/__tests__/googleai-mappers.test.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ import {
4141
PromptFeedback,
4242
SafetyRating,
4343
} from '../lib/public-types';
44-
import { getMockResponse } from './test-utils/mock-response';
44+
import { BackendName, getMockResponse } from './test-utils/mock-response';
4545
import { SpiedFunction } from 'jest-mock';
4646

4747
const fakeModel = 'models/gemini-pro';
@@ -129,7 +129,7 @@ describe('Google AI Mappers', () => {
129129
describe('mapGenerateContentResponse', () => {
130130
it('should map a full Google AI response', async () => {
131131
const googleAIMockResponse: GoogleAIGenerateContentResponse = await (
132-
getMockResponse('unary-success-citations.json') as Response
132+
getMockResponse(BackendName.GoogleAI, 'unary-success-citations.json') as Response
133133
).json();
134134
const mappedResponse = mapGenerateContentResponse(googleAIMockResponse);
135135

packages/ai/__tests__/request.test.ts

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ import { ApiSettings } from '../lib/types/internal';
2020
import { DEFAULT_API_VERSION } from '../lib/constants';
2121
import { AIErrorCode } from '../lib/types';
2222
import { AIError } from '../lib/errors';
23-
import { getMockResponse } from './test-utils/mock-response';
23+
import { BackendName, getMockResponse } from './test-utils/mock-response';
2424
import { VertexAIBackend } from '../lib/backend';
2525

2626
const fakeApiSettings: ApiSettings = {
@@ -350,7 +350,10 @@ describe('request methods', () => {
350350
});
351351

352352
it('Network error, API not enabled', async () => {
353-
const mockResponse = getMockResponse('unary-failure-firebasevertexai-api-not-enabled.json');
353+
const mockResponse = getMockResponse(
354+
BackendName.VertexAI,
355+
'unary-failure-firebasevertexai-api-not-enabled.json',
356+
);
354357
const fetchMock = jest.spyOn(globalThis, 'fetch').mockResolvedValue(mockResponse as Response);
355358
try {
356359
await makeRequest('models/model-name', Task.GENERATE_CONTENT, fakeApiSettings, false, '');

packages/ai/__tests__/stream-reader.test.ts

Lines changed: 21 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,11 @@ import {
2222
processStream,
2323
} from '../lib/requests/stream-reader';
2424

25-
import { getChunkedStream, getMockResponseStreaming } from './test-utils/mock-response';
25+
import {
26+
BackendName,
27+
getChunkedStream,
28+
getMockResponseStreaming,
29+
} from './test-utils/mock-response';
2630
import {
2731
BlockReason,
2832
FinishReason,
@@ -95,7 +99,10 @@ describe('stream-reader', () => {
9599
});
96100

97101
it('streaming response - short', async () => {
98-
const fakeResponse = getMockResponseStreaming('streaming-success-basic-reply-short.txt');
102+
const fakeResponse = getMockResponseStreaming(
103+
BackendName.VertexAI,
104+
'streaming-success-basic-reply-short.txt',
105+
);
99106
const result = processStream(fakeResponse as Response, fakeApiSettings);
100107
for await (const response of result.stream) {
101108
expect(response.text()).not.toBe('');
@@ -105,7 +112,10 @@ describe('stream-reader', () => {
105112
});
106113

107114
it('streaming response - functioncall', async () => {
108-
const fakeResponse = getMockResponseStreaming('streaming-success-function-call-short.txt');
115+
const fakeResponse = getMockResponseStreaming(
116+
BackendName.VertexAI,
117+
'streaming-success-function-call-short.txt',
118+
);
109119
const result = processStream(fakeResponse as Response, fakeApiSettings);
110120
for await (const response of result.stream) {
111121
expect(response.text()).toBe('');
@@ -127,7 +137,10 @@ describe('stream-reader', () => {
127137
});
128138

129139
it('handles citations', async () => {
130-
const fakeResponse = getMockResponseStreaming('streaming-success-citations.txt');
140+
const fakeResponse = getMockResponseStreaming(
141+
BackendName.VertexAI,
142+
'streaming-success-citations.txt',
143+
);
131144
const result = processStream(fakeResponse as Response, fakeApiSettings);
132145
const aggregatedResponse = await result.response;
133146
expect(aggregatedResponse.text()).toContain('Quantum mechanics is');
@@ -143,7 +156,10 @@ describe('stream-reader', () => {
143156
});
144157

145158
it('removes empty text parts', async () => {
146-
const fakeResponse = getMockResponseStreaming('streaming-success-empty-text-part.txt');
159+
const fakeResponse = getMockResponseStreaming(
160+
BackendName.VertexAI,
161+
'streaming-success-empty-text-part.txt',
162+
);
147163
const result = processStream(fakeResponse as Response, fakeApiSettings);
148164
const aggregatedResponse = await result.response;
149165
expect(aggregatedResponse.text()).toBe('1');

0 commit comments

Comments
 (0)