Skip to content

Commit 058027b

Browse files
committed
chore(tests): fixed tests
Signed-off-by: Evzen Gasta <[email protected]>
1 parent 04cff75 commit 058027b

File tree

12 files changed

+171
-2
lines changed

12 files changed

+171
-2
lines changed

packages/backend/src/managers/modelsManager.spec.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -141,6 +141,7 @@ beforeEach(() => {
141141
modelsPath: '~/downloads',
142142
experimentalTuning: false,
143143
apiPort: 0,
144+
inferenceRuntime: 'llama-cpp',
144145
experimentalGPU: false,
145146
showGPUPromotion: false,
146147
appearance: 'dark',
@@ -1007,6 +1008,7 @@ describe('uploadModelToPodmanMachine', () => {
10071008
modelsPath: '~/downloads',
10081009
experimentalTuning: false,
10091010
apiPort: 0,
1011+
inferenceRuntime: 'llama-cpp',
10101012
experimentalGPU: false,
10111013
showGPUPromotion: false,
10121014
appearance: 'dark',

packages/backend/src/workers/provider/LlamaCppPython.spec.ts

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,7 @@ beforeEach(() => {
9797
experimentalGPU: false,
9898
modelsPath: 'model-path',
9999
apiPort: 10434,
100+
inferenceRuntime: 'llama-cpp',
100101
experimentalTuning: false,
101102
modelUploadDisabled: false,
102103
showGPUPromotion: false,
@@ -278,6 +279,7 @@ describe('perform', () => {
278279
experimentalGPU: true,
279280
modelsPath: '',
280281
apiPort: 10434,
282+
inferenceRuntime: 'llama-cpp',
281283
experimentalTuning: false,
282284
modelUploadDisabled: false,
283285
showGPUPromotion: false,
@@ -321,6 +323,7 @@ describe('perform', () => {
321323
experimentalGPU: true,
322324
modelsPath: '',
323325
apiPort: 10434,
326+
inferenceRuntime: 'llama-cpp',
324327
experimentalTuning: false,
325328
modelUploadDisabled: false,
326329
showGPUPromotion: false,
@@ -369,6 +372,7 @@ describe('perform', () => {
369372
experimentalGPU: true,
370373
modelsPath: '',
371374
apiPort: 10434,
375+
inferenceRuntime: 'llama-cpp',
372376
experimentalTuning: false,
373377
modelUploadDisabled: false,
374378
showGPUPromotion: false,
@@ -417,6 +421,7 @@ describe('perform', () => {
417421
experimentalGPU: true,
418422
modelsPath: '',
419423
apiPort: 10434,
424+
inferenceRuntime: 'llama-cpp',
420425
experimentalTuning: false,
421426
modelUploadDisabled: false,
422427
showGPUPromotion: false,
@@ -453,6 +458,7 @@ describe('perform', () => {
453458
experimentalGPU: true,
454459
modelsPath: '',
455460
apiPort: 10434,
461+
inferenceRuntime: 'llama-cpp',
456462
experimentalTuning: false,
457463
modelUploadDisabled: false,
458464
showGPUPromotion: false,
@@ -498,6 +504,7 @@ describe('perform', () => {
498504
experimentalGPU: true,
499505
modelsPath: '',
500506
apiPort: 10434,
507+
inferenceRuntime: 'llama-cpp',
501508
experimentalTuning: false,
502509
modelUploadDisabled: false,
503510
showGPUPromotion: false,
@@ -537,6 +544,7 @@ describe('perform', () => {
537544
experimentalGPU: true,
538545
modelsPath: '',
539546
apiPort: 10434,
547+
inferenceRuntime: 'llama-cpp',
540548
experimentalTuning: false,
541549
modelUploadDisabled: false,
542550
showGPUPromotion: false,
@@ -581,6 +589,7 @@ describe('perform', () => {
581589
experimentalGPU: true,
582590
modelsPath: '',
583591
apiPort: 10434,
592+
inferenceRuntime: 'llama-cpp',
584593
experimentalTuning: false,
585594
modelUploadDisabled: false,
586595
showGPUPromotion: false,

packages/frontend/src/lib/notification/ContainerConnectionWrapper.spec.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,7 @@ beforeEach(() => {
6868
apiPort: 0,
6969
experimentalTuning: false,
7070
modelsPath: '',
71+
inferenceRuntime: 'llama-cpp',
7172
modelUploadDisabled: false,
7273
showGPUPromotion: false,
7374
appearance: 'dark',

packages/frontend/src/lib/notification/GPUPromotion.spec.ts

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@ const mockConfiguration: Writable<ExtensionConfiguration> = writable({
4545
experimentalGPU: false,
4646
modelsPath: '',
4747
apiPort: -1,
48+
inferenceRuntime: 'llama-cpp',
4849
modelUploadDisabled: false,
4950
experimentalTuning: false,
5051
showGPUPromotion: false,
@@ -64,6 +65,7 @@ test('should show banner if gpu support if off and gpu promotion on', async () =
6465
showGPUPromotion: true,
6566
modelUploadDisabled: false,
6667
modelsPath: '',
68+
inferenceRuntime: 'llama-cpp',
6769
experimentalTuning: false,
6870
apiPort: -1,
6971
appearance: 'dark',
@@ -85,6 +87,7 @@ test('should not show banner if gpu support if on and gpu promotion on', async (
8587
showGPUPromotion: true,
8688
modelUploadDisabled: false,
8789
modelsPath: '',
90+
inferenceRuntime: 'llama-cpp',
8891
experimentalTuning: false,
8992
apiPort: -1,
9093
appearance: 'dark',
@@ -106,6 +109,7 @@ test('should not show banner if gpu support if off and gpu promotion off', async
106109
showGPUPromotion: false,
107110
modelUploadDisabled: false,
108111
modelsPath: '',
112+
inferenceRuntime: 'llama-cpp',
109113
experimentalTuning: false,
110114
apiPort: -1,
111115
appearance: 'dark',
@@ -128,6 +132,7 @@ test('click enable should call client', async () => {
128132
showGPUPromotion: true,
129133
modelUploadDisabled: false,
130134
modelsPath: '',
135+
inferenceRuntime: 'llama-cpp',
131136
experimentalTuning: false,
132137
apiPort: -1,
133138
appearance: 'dark',
@@ -155,6 +160,7 @@ test('click hide should call client', async () => {
155160
showGPUPromotion: true,
156161
modelUploadDisabled: false,
157162
modelsPath: '',
163+
inferenceRuntime: 'llama-cpp',
158164
experimentalTuning: false,
159165
apiPort: -1,
160166
appearance: 'dark',

packages/frontend/src/lib/select/ModelSelect.spec.ts

Lines changed: 98 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,27 @@ import { render, fireEvent, within } from '@testing-library/svelte';
2222
import ModelSelect from '/@/lib/select/ModelSelect.svelte';
2323
import type { ModelInfo } from '@shared/models/IModelInfo';
2424
import { InferenceType } from '@shared/models/IInference';
25+
import { writable, type Writable } from 'svelte/store';
26+
import type { ExtensionConfiguration } from '@shared/models/IExtensionConfiguration';
27+
import { configuration } from '/@/stores/extensionConfiguration';
28+
29+
vi.mock('../../stores/extensionConfiguration', () => ({
30+
configuration: {
31+
subscribe: vi.fn(),
32+
unsubscribe: vi.fn(),
33+
},
34+
}));
35+
36+
const mockConfiguration: Writable<ExtensionConfiguration> = writable({
37+
inferenceRuntime: 'llama-cpp',
38+
experimentalGPU: false,
39+
showGPUPromotion: false,
40+
modelUploadDisabled: false,
41+
modelsPath: '',
42+
experimentalTuning: false,
43+
apiPort: -1,
44+
appearance: 'dark',
45+
});
2546

2647
const fakeRecommendedModel: ModelInfo = {
2748
id: 'dummy-model-1',
@@ -45,9 +66,39 @@ const fakeRecommendedRemoteModel: ModelInfo = {
4566
name: 'Dummy Model 3',
4667
} as unknown as ModelInfo;
4768

69+
const fakeRemoteModelWhisper: ModelInfo = {
70+
id: 'dummy-model-4',
71+
backend: InferenceType.WHISPER_CPP,
72+
name: 'Dummy Model 4',
73+
} as unknown as ModelInfo;
74+
75+
const fakeRemoteModelNone: ModelInfo = {
76+
id: 'dummy-model-5',
77+
backend: InferenceType.NONE,
78+
name: 'Dummy Model 5',
79+
} as unknown as ModelInfo;
80+
81+
vi.mock('/@/utils/client', async () => {
82+
return {
83+
studioClient: {
84+
updateExtensionConfiguration: vi.fn(),
85+
telemetryLogUsage: vi.fn(),
86+
},
87+
};
88+
});
89+
90+
vi.mock('../../stores/extensionConfiguration', () => ({
91+
configuration: {
92+
subscribe: vi.fn(),
93+
unsubscribe: vi.fn(),
94+
},
95+
}));
96+
4897
beforeEach(() => {
98+
vi.resetAllMocks();
4999
// mock scrollIntoView
50100
window.HTMLElement.prototype.scrollIntoView = vi.fn();
101+
vi.mocked(configuration).subscribe.mockImplementation(run => mockConfiguration.subscribe(run));
51102
});
52103

53104
test('ModelSelect should list all models provided', async () => {
@@ -70,6 +121,26 @@ test('ModelSelect should list all models provided', async () => {
70121
expect(items[1]).toHaveTextContent(fakeRemoteModel.name);
71122
});
72123

124+
test('ModelSelect should list all models based on selected runtime', async () => {
125+
const { container } = render(ModelSelect, {
126+
value: undefined,
127+
disabled: undefined,
128+
models: [fakeRecommendedModel, fakeRemoteModelWhisper, fakeRemoteModel, fakeRemoteModelNone],
129+
recommended: [],
130+
});
131+
132+
// first get the select input
133+
const input = within(container).getByLabelText('Select Model');
134+
await fireEvent.pointerUp(input); // they are using the pointer up event instead of click.
135+
136+
// get all options available
137+
const items = container.querySelectorAll('div[class~="list-item"]');
138+
// ensure we have two options
139+
expect(items.length).toBe(2);
140+
expect(items[0]).toHaveTextContent(fakeRecommendedModel.name);
141+
expect(items[1]).toHaveTextContent(fakeRemoteModel.name);
142+
});
143+
73144
test('ModelSelect should set star icon next to recommended model', async () => {
74145
const { container } = render(ModelSelect, {
75146
value: undefined,
@@ -110,3 +181,30 @@ test('models should be sorted', async () => {
110181
expect(items[1]).toHaveTextContent(fakeRecommendedRemoteModel.name);
111182
expect(items[2]).toHaveTextContent(fakeRemoteModel.name);
112183
});
184+
185+
test('ModelSelect should filter out models based on selected default runtime', async () => {
186+
const { container } = render(ModelSelect, {
187+
value: undefined,
188+
disabled: undefined,
189+
models: [
190+
fakeRecommendedModel,
191+
fakeRemoteModel,
192+
fakeRemoteModelNone,
193+
fakeRemoteModelWhisper,
194+
fakeRecommendedRemoteModel,
195+
],
196+
recommended: [],
197+
});
198+
199+
// first get the select input
200+
const input = within(container).getByLabelText('Select Model');
201+
await fireEvent.pointerUp(input); // they are using the pointer up event instead of click.
202+
203+
// get all options available
204+
const items = container.querySelectorAll('div[class~="list-item"]');
205+
// ensure we have two options
206+
expect(items.length).toBe(3);
207+
expect(items[0]).toHaveTextContent(fakeRecommendedModel.name);
208+
expect(items[1]).toHaveTextContent(fakeRemoteModel.name);
209+
expect(items[2]).toHaveTextContent(fakeRecommendedRemoteModel.name);
210+
});

packages/frontend/src/lib/select/ModelSelect.svelte

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -50,8 +50,11 @@ function handleOnChange(nValue: (ModelInfo & { label: string; value: string }) |
5050
let defaultRuntime: string = 'llama-cpp';
5151
5252
onMount(() => {
53-
const inferenceRuntime = $configuration?.inferenceRuntime;
54-
if (inferenceRuntime) defaultRuntime = inferenceRuntime;
53+
return configuration.subscribe(values => {
54+
if (values?.inferenceRuntime) {
55+
defaultRuntime = values.inferenceRuntime;
56+
}
57+
});
5558
});
5659
</script>
5760

packages/frontend/src/pages/CreateService.spec.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -117,6 +117,7 @@ beforeEach(() => {
117117
vi.mocked(studioClient.getExtensionConfiguration).mockResolvedValue({
118118
experimentalGPU: false,
119119
apiPort: 0,
120+
inferenceRuntime: 'llama-cpp',
120121
experimentalTuning: false,
121122
modelsPath: '',
122123
modelUploadDisabled: false,

packages/frontend/src/pages/NewInstructLabSession.spec.ts

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,14 @@ vi.mock('../utils/client', async () => ({
4545
studioClient: {
4646
openURL: vi.fn(),
4747
openDialog: vi.fn(),
48+
getExtensionConfiguration: vi.fn(),
49+
},
50+
rpcBrowser: {
51+
subscribe: (): unknown => {
52+
return {
53+
unsubscribe: (): void => {},
54+
};
55+
},
4856
},
4957
}));
5058

@@ -53,6 +61,16 @@ beforeEach(() => {
5361

5462
const infos: Writable<ModelInfo[]> = writable([]);
5563
vi.mocked(modelsInfo).subscribe.mockImplementation(run => infos.subscribe(run));
64+
vi.mocked(studioClient.getExtensionConfiguration).mockResolvedValue({
65+
experimentalGPU: false,
66+
apiPort: 0,
67+
experimentalTuning: false,
68+
modelsPath: '',
69+
inferenceRuntime: 'llama-cpp',
70+
modelUploadDisabled: false,
71+
showGPUPromotion: false,
72+
appearance: 'dark',
73+
});
5674
});
5775

5876
test('empty form should have submit disabled', async () => {

packages/frontend/src/pages/PlaygroundCreate.spec.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,7 @@ vi.mock('../utils/client', async () => {
5959
return {
6060
studioClient: {
6161
requestCreatePlayground: vi.fn(),
62+
getExtensionConfiguration: vi.fn().mockResolvedValue({}),
6263
},
6364
rpcBrowser: {
6465
subscribe: (): unknown => {

packages/frontend/src/pages/Recipes.spec.ts

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,14 @@ vi.mock('/@/stores/catalog', async () => {
3434
vi.mock('../utils/client', async () => ({
3535
studioClient: {
3636
filterRecipes: vi.fn(),
37+
getExtensionConfiguration: vi.fn().mockResolvedValue({}),
38+
},
39+
rpcBrowser: {
40+
subscribe: (): unknown => {
41+
return {
42+
unsubscribe: (): void => {},
43+
};
44+
},
3745
},
3846
}));
3947

@@ -104,6 +112,16 @@ beforeEach(() => {
104112
filters: {},
105113
choices: {},
106114
});
115+
vi.mocked(studioClient.getExtensionConfiguration).mockResolvedValue({
116+
experimentalGPU: false,
117+
apiPort: 0,
118+
experimentalTuning: false,
119+
modelsPath: '',
120+
inferenceRuntime: 'llama-cpp',
121+
modelUploadDisabled: false,
122+
showGPUPromotion: false,
123+
appearance: 'dark',
124+
});
107125
});
108126

109127
test('recipe without category should be visible', async () => {

0 commit comments

Comments
 (0)