Skip to content

Commit a0e7bf6

Browse files
authored
Fix command output not being streamed when auto-approved + model ID under chat field (RooCodeInc#1449)
* Fix bug where auto-approving commands would not stream output back to webview * Fix model id under chat field * Amend
1 parent 54ef05f commit a0e7bf6

File tree

2 files changed

+11
-36
lines changed

2 files changed

+11
-36
lines changed

src/shared/combineCommandSequences.ts

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -25,13 +25,13 @@ export function combineCommandSequences(messages: ClineMessage[]): ClineMessage[
2525

2626
// First pass: combine commands with their outputs
2727
for (let i = 0; i < messages.length; i++) {
28-
if (messages[i].type === "ask" && (messages[i].ask === "command" || messages[i].say === "command")) {
28+
if (messages[i].ask === "command" || messages[i].say === "command") {
2929
let combinedText = messages[i].text || ""
3030
let didAddOutput = false
3131
let j = i + 1
3232

3333
while (j < messages.length) {
34-
if (messages[j].type === "ask" && (messages[j].ask === "command" || messages[j].say === "command")) {
34+
if (messages[j].ask === "command" || messages[j].say === "command") {
3535
// Stop if we encounter the next command
3636
break
3737
}
@@ -63,7 +63,7 @@ export function combineCommandSequences(messages: ClineMessage[]): ClineMessage[
6363
return messages
6464
.filter((msg) => !(msg.ask === "command_output" || msg.say === "command_output"))
6565
.map((msg) => {
66-
if (msg.type === "ask" && (msg.ask === "command" || msg.say === "command")) {
66+
if (msg.ask === "command" || msg.say === "command") {
6767
const combinedCommand = combinedCommands.find((cmd) => cmd.ts === msg.ts)
6868
return combinedCommand || msg
6969
}

webview-ui/src/components/chat/ChatTextArea.tsx

Lines changed: 8 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -3,17 +3,8 @@ import React, { forwardRef, useCallback, useEffect, useLayoutEffect, useMemo, us
33
import DynamicTextArea from "react-textarea-autosize"
44
import { useClickAway, useWindowSize } from "react-use"
55
import styled from "styled-components"
6-
import {
7-
anthropicDefaultModelId,
8-
bedrockDefaultModelId,
9-
deepSeekDefaultModelId,
10-
geminiDefaultModelId,
11-
mistralDefaultModelId,
12-
openAiNativeDefaultModelId,
13-
openRouterDefaultModelId,
14-
vertexDefaultModelId,
15-
} from "../../../../src/shared/api"
166
import { mentionRegex, mentionRegexGlobal } from "../../../../src/shared/context-mentions"
7+
import { ExtensionMessage } from "../../../../src/shared/ExtensionMessage"
178
import { useExtensionState } from "../../context/ExtensionStateContext"
189
import {
1910
ContextMenuOptionType,
@@ -26,7 +17,7 @@ import { validateApiConfiguration, validateModelId } from "../../utils/validate"
2617
import { vscode } from "../../utils/vscode"
2718
import { CODE_BLOCK_BG_COLOR } from "../common/CodeBlock"
2819
import Thumbnails from "../common/Thumbnails"
29-
import ApiOptions from "../settings/ApiOptions"
20+
import ApiOptions, { normalizeApiConfiguration } from "../settings/ApiOptions"
3021
import { MAX_IMAGES_PER_MESSAGE } from "./ChatView"
3122
import ContextMenu from "./ContextMenu"
3223

@@ -686,35 +677,19 @@ const ChatTextArea = forwardRef<HTMLTextAreaElement, ChatTextAreaProps>(
686677

687678
// Get model display name
688679
const modelDisplayName = useMemo(() => {
680+
const { selectedProvider, selectedModelId } = normalizeApiConfiguration(apiConfiguration)
689681
const unknownModel = "unknown"
690682
if (!apiConfiguration) return unknownModel
691-
switch (apiConfiguration.apiProvider) {
683+
switch (selectedProvider) {
692684
case "anthropic":
693-
return `anthropic:${apiConfiguration.apiModelId || anthropicDefaultModelId}`
694-
case "openai":
695-
return `openai:${apiConfiguration.openAiModelId || unknownModel}`
696685
case "openrouter":
697-
return `openrouter:${apiConfiguration.openRouterModelId || openRouterDefaultModelId}`
698-
case "bedrock":
699-
return `bedrock:${apiConfiguration.apiModelId || bedrockDefaultModelId}`
700-
case "vertex":
701-
return `vertex:${apiConfiguration.apiModelId || vertexDefaultModelId}`
702-
case "ollama":
703-
return `ollama:${apiConfiguration.ollamaModelId || unknownModel}`
704-
case "lmstudio":
705-
return `lmstudio:${apiConfiguration.lmStudioModelId || unknownModel}`
706-
case "gemini":
707-
return `gemini:${apiConfiguration.apiModelId || geminiDefaultModelId}`
708-
case "openai-native":
709-
return `openai-native:${apiConfiguration.apiModelId || openAiNativeDefaultModelId}`
710-
case "deepseek":
711-
return `deepseek:${apiConfiguration.apiModelId || deepSeekDefaultModelId}`
712-
case "mistral":
713-
return `mistral:${apiConfiguration.apiModelId || mistralDefaultModelId}`
686+
return `${selectedProvider}:${selectedModelId}`
687+
case "openai":
688+
return `openai-compat:${selectedModelId}`
714689
case "vscode-lm":
715690
return `vscode-lm:${apiConfiguration.vsCodeLmModelSelector ? `${apiConfiguration.vsCodeLmModelSelector.vendor ?? ""}/${apiConfiguration.vsCodeLmModelSelector.family ?? ""}` : unknownModel}`
716691
default:
717-
return unknownModel
692+
return `${selectedProvider}:${selectedModelId}`
718693
}
719694
}, [apiConfiguration])
720695

0 commit comments

Comments
 (0)