11import axios from "axios"
22import { vi , describe , it , expect , beforeEach } from "vitest"
3- import { LMStudioClient , LLMInfo } from "@lmstudio/sdk" // LLMInfo is a type
3+ import { LMStudioClient , LLMInfo , LLMInstanceInfo } from "@lmstudio/sdk" // LLMInfo is a type
44import { getLMStudioModels , parseLMStudioModel } from "../lmstudio"
55import { ModelInfo , lMStudioDefaultModelInfo } from "@roo-code/types" // ModelInfo is a type
66
@@ -29,28 +29,30 @@ describe("LMStudio Fetcher", () => {
2929
3030 describe ( "parseLMStudioModel" , ( ) => {
3131 it ( "should correctly parse raw LLMInfo to ModelInfo" , ( ) => {
32- const rawModel : LLMInfo = {
33- architecture : "llama" ,
34- modelKey : "mistral-7b-instruct-v0.2.Q4_K_M.gguf" ,
35- path : "/Users/username/.cache/lm-studio/models/Mistral AI/Mistral-7B-Instruct-v0.2/mistral-7b-instruct-v0.2.Q4_K_M.gguf" ,
32+ const rawModel : LLMInstanceInfo = {
3633 type : "llm" ,
37- displayName : "Mistral-7B-Instruct-v0.2-Q4_K_M" ,
38- maxContextLength : 8192 ,
39- paramsString : "7B params, 8k context" ,
34+ modelKey : "mistralai/devstral-small-2505" ,
35+ format : "safetensors" ,
36+ displayName : "Devstral Small 2505" ,
37+ path : "mistralai/devstral-small-2505" ,
38+ sizeBytes : 13277565112 ,
39+ architecture : "mistral" ,
40+ identifier : "mistralai/devstral-small-2505" ,
41+ instanceReference : "RAP5qbeHVjJgBiGFQ6STCuTJ" ,
4042 vision : false ,
41- format : "gguf" ,
42- sizeBytes : 4080000000 ,
43- trainedForToolUse : false , // Added
43+ trainedForToolUse : false ,
44+ maxContextLength : 131072 ,
45+ contextLength : 7161 ,
4446 }
4547
4648 const expectedModelInfo : ModelInfo = {
4749 ...lMStudioDefaultModelInfo ,
4850 description : `${ rawModel . displayName } - ${ rawModel . paramsString } - ${ rawModel . path } ` ,
49- contextWindow : rawModel . maxContextLength ,
51+ contextWindow : rawModel . contextLength ,
5052 supportsPromptCache : true ,
5153 supportsImages : rawModel . vision ,
5254 supportsComputerUse : false ,
53- maxTokens : rawModel . maxContextLength ,
55+ maxTokens : rawModel . contextLength ,
5456 inputPrice : 0 ,
5557 outputPrice : 0 ,
5658 cacheWritesPrice : 0 ,
@@ -66,13 +68,16 @@ describe("LMStudio Fetcher", () => {
6668 const baseUrl = "http://localhost:1234"
6769 const lmsUrl = "ws://localhost:1234"
6870
69- const mockRawModel : LLMInfo = {
71+ const mockRawModel : LLMInstanceInfo = {
7072 architecture : "test-arch" ,
73+ identifier : "mistralai/devstral-small-2505" ,
74+ instanceReference : "RAP5qbeHVjJgBiGFQ6STCuTJ" ,
7175 modelKey : "test-model-key-1" ,
7276 path : "/path/to/test-model-1" ,
7377 type : "llm" ,
7478 displayName : "Test Model One" ,
7579 maxContextLength : 2048 ,
80+ contextLength : 7161 ,
7681 paramsString : "1B params, 2k context" ,
7782 vision : true ,
7883 format : "gguf" ,
@@ -81,7 +86,7 @@ describe("LMStudio Fetcher", () => {
8186 }
8287
8388 it ( "should fetch and parse models successfully" , async ( ) => {
84- const mockApiResponse : LLMInfo [ ] = [ mockRawModel ]
89+ const mockApiResponse : LLMInstanceInfo [ ] = [ mockRawModel ]
8590 mockedAxios . get . mockResolvedValueOnce ( { data : { status : "ok" } } )
8691 mockListDownloadedModels . mockResolvedValueOnce ( mockApiResponse )
8792
0 commit comments