Skip to content

Commit 4e39390

Browse files
committed
test: Add comprehensive dispatch logic tests for GenerativeModel
This commit adds a new test suite to verify that the GenerativeModel's methods correctly dispatch requests to either the on-device or cloud backends based on the selected InferenceMode. It covers generateContent, generateContentStream, and countTokens.
1 parent 8ebbae4 commit 4e39390

File tree

3 files changed

+252
-7
lines changed

3 files changed

+252
-7
lines changed

packages/ai/src/methods/helpers.test.ts

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -162,7 +162,9 @@ describe('callCloudOrDevice', () => {
162162
});
163163

164164
it('should fall back to onDeviceCall if inCloudCall fails with AIError', async () => {
165-
inCloudCall.rejects(new AIError(AIErrorCode.FETCH_ERROR, 'Network error'));
165+
inCloudCall.rejects(
166+
new AIError(AIErrorCode.FETCH_ERROR, 'Network error')
167+
);
166168
const result = await callCloudOrDevice(
167169
request,
168170
chromeAdapter,

packages/ai/src/methods/helpers.ts

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -16,10 +16,7 @@
1616
*/
1717

1818
import { AIError } from '../errors';
19-
import {
20-
GenerateContentRequest,
21-
InferenceMode
22-
} from '../types';
19+
import { GenerateContentRequest, InferenceMode } from '../types';
2320
import { ChromeAdapter } from '../types/chrome-adapter';
2421

2522
/**

packages/ai/src/models/generative-model.test.ts

Lines changed: 248 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,13 +16,20 @@
1616
*/
1717
import { use, expect } from 'chai';
1818
import { GenerativeModel } from './generative-model';
19-
import { FunctionCallingMode, AI, InferenceMode } from '../public-types';
19+
import {
20+
FunctionCallingMode,
21+
AI,
22+
InferenceMode,
23+
AIErrorCode
24+
} from '../public-types';
2025
import * as request from '../requests/request';
21-
import { match, restore, stub } from 'sinon';
26+
import { SinonStub, match, restore, stub } from 'sinon';
2227
import { getMockResponse } from '../../test-utils/mock-response';
2328
import sinonChai from 'sinon-chai';
2429
import { VertexAIBackend } from '../backend';
30+
import { ChromeAdapter } from '../types/chrome-adapter';
2531
import { ChromeAdapterImpl } from '../methods/chrome-adapter';
32+
import { AIError } from '../errors';
2633

2734
use(sinonChai);
2835

@@ -406,3 +413,242 @@ describe('GenerativeModel', () => {
406413
restore();
407414
});
408415
});
416+
417+
describe('GenerativeModel dispatch logic', () => {
418+
let makeRequestStub: SinonStub;
419+
let mockChromeAdapter: ChromeAdapter;
420+
421+
beforeEach(() => {
422+
makeRequestStub = stub(request, 'makeRequest').resolves(
423+
getMockResponse(
424+
'vertexAI',
425+
'unary-success-basic-reply-short.json'
426+
) as Response
427+
);
428+
mockChromeAdapter = {
429+
isAvailable: stub(),
430+
generateContent: stub().resolves({} as Response),
431+
generateContentStream: stub().resolves({} as Response),
432+
countTokens: stub().resolves({} as Response),
433+
mode: InferenceMode.PREFER_ON_DEVICE
434+
};
435+
});
436+
437+
afterEach(() => {
438+
restore();
439+
});
440+
441+
describe('PREFER_ON_DEVICE', () => {
442+
beforeEach(() => {
443+
mockChromeAdapter.mode = InferenceMode.PREFER_ON_DEVICE;
444+
});
445+
it('should use on-device for generateContent when available', async () => {
446+
(mockChromeAdapter.isAvailable as SinonStub).resolves(true);
447+
const model = new GenerativeModel(
448+
fakeAI,
449+
{ model: 'model' },
450+
{},
451+
mockChromeAdapter
452+
);
453+
await model.generateContent('hello');
454+
expect(mockChromeAdapter.generateContent).to.have.been.calledOnce;
455+
expect(makeRequestStub).to.not.have.been.called;
456+
});
457+
it('should use cloud for generateContent when on-device is not available', async () => {
458+
(mockChromeAdapter.isAvailable as SinonStub).resolves(false);
459+
const model = new GenerativeModel(
460+
fakeAI,
461+
{ model: 'model' },
462+
{},
463+
mockChromeAdapter
464+
);
465+
await model.generateContent('hello');
466+
expect(mockChromeAdapter.generateContent).to.not.have.been.called;
467+
expect(makeRequestStub).to.have.been.calledOnce;
468+
});
469+
it('should use on-device for generateContentStream when available', async () => {
470+
(mockChromeAdapter.isAvailable as SinonStub).resolves(true);
471+
const model = new GenerativeModel(
472+
fakeAI,
473+
{ model: 'model' },
474+
{},
475+
mockChromeAdapter
476+
);
477+
await model.generateContentStream('hello');
478+
expect(mockChromeAdapter.generateContentStream).to.have.been.calledOnce;
479+
expect(makeRequestStub).to.not.have.been.called;
480+
});
481+
it('should use cloud for generateContentStream when on-device is not available', async () => {
482+
(mockChromeAdapter.isAvailable as SinonStub).resolves(false);
483+
const model = new GenerativeModel(
484+
fakeAI,
485+
{ model: 'model' },
486+
{},
487+
mockChromeAdapter
488+
);
489+
await model.generateContentStream('hello');
490+
expect(mockChromeAdapter.generateContentStream).to.not.have.been.called;
491+
expect(makeRequestStub).to.have.been.calledOnce;
492+
});
493+
it('should use cloud for countTokens', async () => {
494+
const model = new GenerativeModel(
495+
fakeAI,
496+
{ model: 'model' },
497+
{},
498+
mockChromeAdapter
499+
);
500+
await model.countTokens('hello');
501+
expect(makeRequestStub).to.have.been.calledOnce;
502+
});
503+
});
504+
505+
describe('ONLY_ON_DEVICE', () => {
506+
beforeEach(() => {
507+
mockChromeAdapter.mode = InferenceMode.ONLY_ON_DEVICE;
508+
});
509+
it('should use on-device for generateContent when available', async () => {
510+
(mockChromeAdapter.isAvailable as SinonStub).resolves(true);
511+
const model = new GenerativeModel(
512+
fakeAI,
513+
{ model: 'model' },
514+
{},
515+
mockChromeAdapter
516+
);
517+
await model.generateContent('hello');
518+
expect(mockChromeAdapter.generateContent).to.have.been.calledOnce;
519+
expect(makeRequestStub).to.not.have.been.called;
520+
});
521+
it('should use cloud for generateContent when on-device is not available', async () => {
522+
(mockChromeAdapter.isAvailable as SinonStub).resolves(false);
523+
const model = new GenerativeModel(
524+
fakeAI,
525+
{ model: 'model' },
526+
{},
527+
mockChromeAdapter
528+
);
529+
await model.generateContent('hello');
530+
expect(mockChromeAdapter.generateContent).to.not.have.been.called;
531+
expect(makeRequestStub).to.have.been.calledOnce;
532+
});
533+
it('should throw for countTokens', async () => {
534+
const model = new GenerativeModel(
535+
fakeAI,
536+
{ model: 'model' },
537+
{},
538+
mockChromeAdapter
539+
);
540+
await expect(model.countTokens('hello')).to.be.rejectedWith(
541+
/countTokens\(\) is not supported for on-device models/
542+
);
543+
expect(makeRequestStub).to.not.have.been.called;
544+
});
545+
});
546+
547+
describe('ONLY_IN_CLOUD', () => {
548+
beforeEach(() => {
549+
mockChromeAdapter.mode = InferenceMode.ONLY_IN_CLOUD;
550+
});
551+
it('should use cloud for generateContent even when on-device is available', async () => {
552+
(mockChromeAdapter.isAvailable as SinonStub).resolves(true);
553+
const model = new GenerativeModel(
554+
fakeAI,
555+
{ model: 'model' },
556+
{},
557+
mockChromeAdapter
558+
);
559+
await model.generateContent('hello');
560+
expect(mockChromeAdapter.generateContent).to.not.have.been.called;
561+
expect(makeRequestStub).to.have.been.calledOnce;
562+
});
563+
it('should use cloud for generateContentStream even when on-device is available', async () => {
564+
(mockChromeAdapter.isAvailable as SinonStub).resolves(true);
565+
const model = new GenerativeModel(
566+
fakeAI,
567+
{ model: 'model' },
568+
{},
569+
mockChromeAdapter
570+
);
571+
await model.generateContentStream('hello');
572+
expect(mockChromeAdapter.generateContentStream).to.not.have.been.called;
573+
expect(makeRequestStub).to.have.been.calledOnce;
574+
});
575+
it('should use cloud for countTokens', async () => {
576+
const model = new GenerativeModel(
577+
fakeAI,
578+
{ model: 'model' },
579+
{},
580+
mockChromeAdapter
581+
);
582+
await model.countTokens('hello');
583+
expect(makeRequestStub).to.have.been.calledOnce;
584+
});
585+
});
586+
587+
describe('PREFER_IN_CLOUD', () => {
588+
beforeEach(() => {
589+
mockChromeAdapter.mode = InferenceMode.PREFER_IN_CLOUD;
590+
});
591+
it('should use cloud for generateContent when available', async () => {
592+
const model = new GenerativeModel(
593+
fakeAI,
594+
{ model: 'model' },
595+
{},
596+
mockChromeAdapter
597+
);
598+
await model.generateContent('hello');
599+
expect(makeRequestStub).to.have.been.calledOnce;
600+
expect(mockChromeAdapter.generateContent).to.not.have.been.called;
601+
});
602+
it('should fall back to on-device for generateContent if cloud fails', async () => {
603+
makeRequestStub.rejects(
604+
new AIError(AIErrorCode.FETCH_ERROR, 'Network error')
605+
);
606+
(mockChromeAdapter.isAvailable as SinonStub).resolves(true);
607+
const model = new GenerativeModel(
608+
fakeAI,
609+
{ model: 'model' },
610+
{},
611+
mockChromeAdapter
612+
);
613+
await model.generateContent('hello');
614+
expect(makeRequestStub).to.have.been.calledOnce;
615+
expect(mockChromeAdapter.generateContent).to.have.been.calledOnce;
616+
});
617+
it('should use cloud for generateContentStream when available', async () => {
618+
const model = new GenerativeModel(
619+
fakeAI,
620+
{ model: 'model' },
621+
{},
622+
mockChromeAdapter
623+
);
624+
await model.generateContentStream('hello');
625+
expect(makeRequestStub).to.have.been.calledOnce;
626+
expect(mockChromeAdapter.generateContentStream).to.not.have.been.called;
627+
});
628+
it('should fall back to on-device for generateContentStream if cloud fails', async () => {
629+
makeRequestStub.rejects(
630+
new AIError(AIErrorCode.FETCH_ERROR, 'Network error')
631+
);
632+
(mockChromeAdapter.isAvailable as SinonStub).resolves(true);
633+
const model = new GenerativeModel(
634+
fakeAI,
635+
{ model: 'model' },
636+
{},
637+
mockChromeAdapter
638+
);
639+
await model.generateContentStream('hello');
640+
expect(makeRequestStub).to.have.been.calledOnce;
641+
expect(mockChromeAdapter.generateContentStream).to.have.been.calledOnce;
642+
});
643+
it('should use cloud for countTokens', async () => {
644+
const model = new GenerativeModel(
645+
fakeAI,
646+
{ model: 'model' },
647+
{},
648+
mockChromeAdapter
649+
);
650+
await model.countTokens('hello');
651+
expect(makeRequestStub).to.have.been.calledOnce;
652+
});
653+
});
654+
});

0 commit comments

Comments
 (0)