Skip to content

Commit 7fccf15

Browse files
committed
Run prettier on files
1 parent ee4753a commit 7fccf15

File tree

3 files changed

+102
-100
lines changed

3 files changed

+102
-100
lines changed

.prettierignore

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
dist
2+
build
3+
out
4+
.next
5+
.venv
6+
pnpm-lock.yaml

src/api/providers/lmstudio.ts

Lines changed: 78 additions & 83 deletions
Original file line numberDiff line numberDiff line change
@@ -25,108 +25,103 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan
2525
}
2626

2727
override async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
28-
const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [
29-
{ role: "system", content: systemPrompt },
30-
...convertToOpenAiMessages(messages),
31-
]
32-
33-
// -------------------------
34-
// Track token usage
35-
// -------------------------
36-
const toContentBlocks = (
37-
blocks: Anthropic.Messages.MessageParam[] | string,
38-
): Anthropic.Messages.ContentBlockParam[] => {
39-
if (typeof blocks === "string") {
40-
return [{ type: "text", text: blocks }]
41-
}
28+
const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [
29+
{ role: "system", content: systemPrompt },
30+
...convertToOpenAiMessages(messages),
31+
]
32+
33+
// -------------------------
34+
// Track token usage
35+
// -------------------------
36+
const toContentBlocks = (
37+
blocks: Anthropic.Messages.MessageParam[] | string,
38+
): Anthropic.Messages.ContentBlockParam[] => {
39+
if (typeof blocks === "string") {
40+
return [{ type: "text", text: blocks }]
41+
}
4242

43-
const result: Anthropic.Messages.ContentBlockParam[] = []
44-
for (const msg of blocks) {
45-
if (typeof msg.content === "string") {
46-
result.push({ type: "text", text: msg.content })
47-
} else if (Array.isArray(msg.content)) {
48-
for (const part of msg.content) {
49-
if (part.type === "text") {
50-
result.push({ type: "text", text: part.text })
43+
const result: Anthropic.Messages.ContentBlockParam[] = []
44+
for (const msg of blocks) {
45+
if (typeof msg.content === "string") {
46+
result.push({ type: "text", text: msg.content })
47+
} else if (Array.isArray(msg.content)) {
48+
for (const part of msg.content) {
49+
if (part.type === "text") {
50+
result.push({ type: "text", text: part.text })
51+
}
5152
}
5253
}
5354
}
55+
return result
5456
}
55-
return result
56-
}
5757

58-
let inputTokens = 0
59-
try {
60-
inputTokens = await this.countTokens([
61-
{ type: "text", text: systemPrompt },
62-
...toContentBlocks(messages),
63-
])
64-
} catch (err) {
65-
console.error("[LmStudio] Failed to count input tokens:", err)
66-
inputTokens = 0
67-
}
58+
let inputTokens = 0
59+
try {
60+
inputTokens = await this.countTokens([{ type: "text", text: systemPrompt }, ...toContentBlocks(messages)])
61+
} catch (err) {
62+
console.error("[LmStudio] Failed to count input tokens:", err)
63+
inputTokens = 0
64+
}
6865

69-
let assistantText = ""
66+
let assistantText = ""
7067

71-
try {
72-
const params: OpenAI.Chat.ChatCompletionCreateParamsStreaming & { draft_model?: string } = {
73-
model: this.getModel().id,
74-
messages: openAiMessages,
75-
temperature: this.options.modelTemperature ?? LMSTUDIO_DEFAULT_TEMPERATURE,
76-
stream: true,
77-
}
68+
try {
69+
const params: OpenAI.Chat.ChatCompletionCreateParamsStreaming & { draft_model?: string } = {
70+
model: this.getModel().id,
71+
messages: openAiMessages,
72+
temperature: this.options.modelTemperature ?? LMSTUDIO_DEFAULT_TEMPERATURE,
73+
stream: true,
74+
}
7875

79-
if (this.options.lmStudioSpeculativeDecodingEnabled && this.options.lmStudioDraftModelId) {
80-
params.draft_model = this.options.lmStudioDraftModelId
81-
}
76+
if (this.options.lmStudioSpeculativeDecodingEnabled && this.options.lmStudioDraftModelId) {
77+
params.draft_model = this.options.lmStudioDraftModelId
78+
}
8279

83-
const results = await this.client.chat.completions.create(params)
80+
const results = await this.client.chat.completions.create(params)
8481

85-
const matcher = new XmlMatcher(
86-
"think",
87-
(chunk) =>
88-
({
89-
type: chunk.matched ? "reasoning" : "text",
90-
text: chunk.data,
91-
}) as const,
92-
)
82+
const matcher = new XmlMatcher(
83+
"think",
84+
(chunk) =>
85+
({
86+
type: chunk.matched ? "reasoning" : "text",
87+
text: chunk.data,
88+
}) as const,
89+
)
9390

94-
for await (const chunk of results) {
95-
const delta = chunk.choices[0]?.delta
91+
for await (const chunk of results) {
92+
const delta = chunk.choices[0]?.delta
9693

97-
if (delta?.content) {
98-
assistantText += delta.content
99-
for (const processedChunk of matcher.update(delta.content)) {
100-
yield processedChunk
94+
if (delta?.content) {
95+
assistantText += delta.content
96+
for (const processedChunk of matcher.update(delta.content)) {
97+
yield processedChunk
98+
}
10199
}
102100
}
103-
}
104101

105-
for (const processedChunk of matcher.final()) {
106-
yield processedChunk
107-
}
102+
for (const processedChunk of matcher.final()) {
103+
yield processedChunk
104+
}
108105

109-
110-
let outputTokens = 0
111-
try {
112-
outputTokens = await this.countTokens([{ type: "text", text: assistantText }])
113-
} catch (err) {
114-
console.error("[LmStudio] Failed to count output tokens:", err)
115-
outputTokens = 0
116-
}
106+
let outputTokens = 0
107+
try {
108+
outputTokens = await this.countTokens([{ type: "text", text: assistantText }])
109+
} catch (err) {
110+
console.error("[LmStudio] Failed to count output tokens:", err)
111+
outputTokens = 0
112+
}
117113

118-
yield {
119-
type: "usage",
120-
inputTokens,
121-
outputTokens,
122-
} as const
123-
} catch (error) {
124-
throw new Error(
125-
"Please check the LM Studio developer logs to debug what went wrong. You may need to load the model with a larger context length to work with Roo Code's prompts.",
126-
)
114+
yield {
115+
type: "usage",
116+
inputTokens,
117+
outputTokens,
118+
} as const
119+
} catch (error) {
120+
throw new Error(
121+
"Please check the LM Studio developer logs to debug what went wrong. You may need to load the model with a larger context length to work with Roo Code's prompts.",
122+
)
123+
}
127124
}
128-
}
129-
130125

131126
override getModel(): { id: string; info: ModelInfo } {
132127
return {

src/integrations/editor/DiffViewProvider.ts

Lines changed: 18 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -303,22 +303,23 @@ export class DiffViewProvider {
303303

304304
private async closeAllDiffViews(): Promise<void> {
305305
const closeOps = vscode.window.tabGroups.all
306-
.flatMap(group => group.tabs)
307-
.filter(
308-
tab =>
309-
tab.input instanceof vscode.TabInputTextDiff &&
310-
tab.input.original.scheme === DIFF_VIEW_URI_SCHEME &&
311-
!tab.isDirty
312-
)
313-
.map(tab =>
314-
vscode.window.tabGroups.close(tab).then(
315-
() => undefined,
316-
err => {
317-
console.error(`Failed to close diff tab ${tab.label}`, err);
318-
}
319-
));
320-
321-
await Promise.all(closeOps);
306+
.flatMap((group) => group.tabs)
307+
.filter(
308+
(tab) =>
309+
tab.input instanceof vscode.TabInputTextDiff &&
310+
tab.input.original.scheme === DIFF_VIEW_URI_SCHEME &&
311+
!tab.isDirty,
312+
)
313+
.map((tab) =>
314+
vscode.window.tabGroups.close(tab).then(
315+
() => undefined,
316+
(err) => {
317+
console.error(`Failed to close diff tab ${tab.label}`, err)
318+
},
319+
),
320+
)
321+
322+
await Promise.all(closeOps)
322323
}
323324

324325
private async openDiffEditor(): Promise<vscode.TextEditor> {
@@ -425,7 +426,7 @@ export class DiffViewProvider {
425426
return result
426427
}
427428

428-
async reset() : Promise<void> {
429+
async reset(): Promise<void> {
429430
await this.closeAllDiffViews()
430431
this.editType = undefined
431432
this.isEditing = false

0 commit comments

Comments
 (0)