Skip to content

Commit 8a1f36d

Browse files
committed
refactor: remove console.logs
1 parent 0fe4cf4 commit 8a1f36d

File tree

8 files changed

+1
-58
lines changed

8 files changed

+1
-58
lines changed

src/services/continuedev/core/autocomplete/CompletionProvider.ts

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,6 @@ export class CompletionProvider {
7575
llm.useLegacyCompletionsEndpoint = true
7676
}
7777

78-
console.log("using LLM", llm)
7978
return llm
8079
}
8180

@@ -137,7 +136,6 @@ export class CompletionProvider {
137136
// Create abort signal if not given
138137
if (!token) {
139138
const controller = this.loggingService.createAbortController(input.completionId)
140-
console.log("creating abort token because none given")
141139
token = controller.signal
142140
}
143141
const startTime = Date.now()
@@ -210,16 +208,12 @@ export class CompletionProvider {
210208
for await (const update of completionStream) {
211209
completion += update
212210
}
213-
console.log("completion here", completion)
214211

215212
// Don't postprocess if aborted
216213
if (token.aborted) {
217-
console.log("aborted")
218214
return undefined
219215
}
220216

221-
console.log("raw completion", completion)
222-
223217
const processedCompletion = helper.options.transform
224218
? postprocessCompletion({
225219
completion,

src/services/continuedev/core/autocomplete/generation/CompletionStreamer.ts

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,6 @@ export class CompletionStreamer {
3030
const generator = this.generatorReuseManager.getGenerator(
3131
prefix,
3232
(abortSignal: AbortSignal) => {
33-
console.log("SUPPORT FIM", llm.supportsFim())
3433
const generator = llm.supportsFim()
3534
? llm.streamFim(prefix, suffix, abortSignal, completionOptions)
3635
: llm.streamComplete(prompt, abortSignal, {

src/services/continuedev/core/autocomplete/templating/index.ts

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -199,16 +199,6 @@ export function renderPromptWithTokenLimit({
199199

200200
const stopTokens = getStopTokens(completionOptions, helper.lang, helper.modelName)
201201

202-
console.log("HEREEEE", {
203-
prompt,
204-
compiledPrefix,
205-
compiledSuffix,
206-
completionOptions: {
207-
...completionOptions,
208-
stop: stopTokens,
209-
},
210-
})
211-
212202
return {
213203
prompt,
214204
prefix: compiledPrefix,

src/services/continuedev/core/index.d.ts

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -620,7 +620,7 @@ export class SignatureInformation {
620620
* The label of this signature. Will be shown in
621621
* the UI.
622622
*/
623-
label: strin
623+
label: string
624624
/**
625625
* The parameters of this signature.
626626
*/
@@ -632,7 +632,6 @@ export class SignatureInformation {
632632
* If provided, this is used in place of {@linkcode SignatureHelp.activeParameter}.
633633
*/
634634
activeParameter?: number
635-
g
636635
}
637636

638637
export type ConfigMergeType = "merge" | "overwrite"

src/services/continuedev/core/llm/llms/KiloCode.ts

Lines changed: 0 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -120,27 +120,6 @@ class KiloCode extends OpenRouter {
120120

121121
const endpoint = new URL("fim/completions", this.apiFIMBase)
122122

123-
console.log("endpoint", endpoint)
124-
console.log("HEADER", {
125-
"Content-Type": "application/json",
126-
Accept: "application/json",
127-
"x-api-key": this.apiKey ?? "",
128-
Authorization: `Bearer ${this.apiKey}`,
129-
})
130-
console.log("BODYYYY", {
131-
model: options.model,
132-
prompt: prefix,
133-
suffix,
134-
max_tokens: options.maxTokens,
135-
temperature: options.temperature,
136-
top_p: options.topP,
137-
frequency_penalty: options.frequencyPenalty,
138-
presence_penalty: options.presencePenalty,
139-
stop: options.stop,
140-
stream: true,
141-
...this.extraBodyProperties(),
142-
})
143-
144123
try {
145124
const resp = await fetch(endpoint, {
146125
method: "POST",

src/services/continuedev/core/llm/llms/OpenAI.ts

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -169,12 +169,6 @@ export class OpenAI extends BaseLLM {
169169
args.prompt = prompt
170170
args.messages = undefined
171171

172-
console.log("Legacy Body", {
173-
...args,
174-
stream: true,
175-
...this.extraBodyProperties(),
176-
})
177-
178172
const response = await fetch(this._getEndpoint("completions"), {
179173
method: "POST",
180174
headers: this._getHeaders(),
@@ -218,11 +212,6 @@ export class OpenAI extends BaseLLM {
218212

219213
const body = this._convertArgs(options, messages)
220214

221-
console.log("BODYYYYY", {
222-
...body,
223-
...this.extraBodyProperties(),
224-
})
225-
226215
const response = await fetch(this._getEndpoint("chat/completions"), {
227216
method: "POST",
228217
headers: this._getHeaders(),

src/services/continuedev/core/llm/llms/OpenRouter.ts

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -75,8 +75,6 @@ class OpenRouter extends OpenAI {
7575
// First apply parent modifications
7676
body = super.modifyChatBody(body)
7777

78-
console.log("BODY", body)
79-
8078
// Check if we should apply Anthropic caching
8179
if (!this.isAnthropicModel(body.model) || (!this.cacheBehavior && !this.completionOptions.promptCaching)) {
8280
return body

src/services/continuedev/core/llm/openai-adapters/apis/OpenAI.ts

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,6 @@ export class OpenAIApi implements BaseLlmApi {
7878
body: ChatCompletionCreateParamsNonStreaming,
7979
signal: AbortSignal,
8080
): Promise<ChatCompletion> {
81-
console.log("chatCompletionNonStream", body)
8281
const response = await this.openai.chat.completions.create(this.modifyChatBody(body), {
8382
signal,
8483
})
@@ -89,7 +88,6 @@ export class OpenAIApi implements BaseLlmApi {
8988
body: ChatCompletionCreateParamsStreaming,
9089
signal: AbortSignal,
9190
): AsyncGenerator<ChatCompletionChunk, any, unknown> {
92-
console.log("chatCompletionStream", body)
9391
const response = await this.openai.chat.completions.create(this.modifyChatBody(body), {
9492
signal,
9593
})
@@ -98,15 +96,13 @@ export class OpenAIApi implements BaseLlmApi {
9896
}
9997
}
10098
async completionNonStream(body: CompletionCreateParamsNonStreaming, signal: AbortSignal): Promise<Completion> {
101-
console.log("completionNonStream", body)
10299
const response = await this.openai.completions.create(this.modifyCompletionBody(body), { signal })
103100
return response
104101
}
105102
async *completionStream(
106103
body: CompletionCreateParamsStreaming,
107104
signal: AbortSignal,
108105
): AsyncGenerator<Completion, any, unknown> {
109-
console.log("completionStream", body)
110106
const response = await this.openai.completions.create(this.modifyCompletionBody(body), { signal })
111107
for await (const result of response) {
112108
yield result
@@ -116,7 +112,6 @@ export class OpenAIApi implements BaseLlmApi {
116112
body: FimCreateParamsStreaming,
117113
signal: AbortSignal,
118114
): AsyncGenerator<ChatCompletionChunk, any, unknown> {
119-
console.log("fimStream", body)
120115
const endpoint = new URL("fim/completions", this.apiBase)
121116
const modifiedBody = this.modifyFimBody(body)
122117
const resp = await fetch(endpoint, {

0 commit comments

Comments
 (0)