Skip to content

Commit 8a4afec

Browse files
authored
feat(tools): add gh_issue (#974)
Addresses #873 Add the gh_issue tool that allows Q to open pre-filled issue templates with additional information. Similar to q issue, when asked to report an issue, a browser will open to the template (or URL if the browser cannot be opened). Features: - Triggers when asked to create or report an issue, or to open a feature request. - Adds additional chat information automatically. - Q could fill in expected/actual behavior, steps to reproduce, title. - Includes any request IDs that resulted in an error. - Includes chat context (from /context show --expand). This also includes their sizes. - Includes the last 5 user-assistant requests from the chat transcript. This also includes any tool uses or errors that were displayed.
1 parent 7307b53 commit 8a4afec

File tree

6 files changed

+416
-101
lines changed

6 files changed

+416
-101
lines changed

crates/q_cli/src/cli/chat/conversation_state.rs

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,11 @@ pub struct ConversationState {
5959
/// calling [Self::as_sendable_conversation_state].
6060
pub next_message: Option<UserInputMessage>,
6161
history: VecDeque<ChatMessage>,
62+
/// Similar to history in that stores user and assistant responses, except that it is not used
63+
/// in message requests. Instead, the responses are expected to be in human-readable format,
64+
/// e.g user messages prefixed with '> '. Should also be used to store errors posted in the
65+
/// chat.
66+
pub transcript: VecDeque<String>,
6267
tools: Vec<Tool>,
6368
/// Context manager for handling sticky context files
6469
pub context_manager: Option<ContextManager>,
@@ -92,6 +97,7 @@ impl ConversationState {
9297
conversation_id,
9398
next_message: None,
9499
history: VecDeque::new(),
100+
transcript: VecDeque::with_capacity(MAX_CONVERSATION_STATE_HISTORY_LEN),
95101
tools: tool_config
96102
.into_values()
97103
.map(|v| {
@@ -151,6 +157,9 @@ impl ConversationState {
151157
None
152158
};
153159

160+
// Record message before adding context.
161+
self.append_user_transcript(&input);
162+
154163
// Combine context files with user input if available
155164
let content = if let Some(context) = context_files {
156165
format!("{}\n{}", context, input)
@@ -183,6 +192,8 @@ impl ConversationState {
183192
if let Some(next_message) = self.next_message.as_ref() {
184193
warn!(?next_message, "next_message should not exist");
185194
}
195+
196+
self.append_assistant_transcript(&message);
186197
self.history.push_back(ChatMessage::AssistantResponseMessage(message));
187198
}
188199

@@ -460,6 +471,24 @@ impl ConversationState {
460471
pub fn context_message_length(&self) -> Option<usize> {
461472
self.context_message_length
462473
}
474+
475+
pub fn append_user_transcript(&mut self, message: &str) {
476+
self.append_transcript(format!("> {}", message.replace("\n", "> \n")));
477+
}
478+
479+
pub fn append_assistant_transcript(&mut self, message: &AssistantResponseMessage) {
480+
let tool_uses = message.tool_uses.as_deref().map_or("none".to_string(), |tools| {
481+
tools.iter().map(|tool| tool.name.clone()).collect::<Vec<_>>().join(",")
482+
});
483+
self.append_transcript(format!("{}\n[Tool uses: {tool_uses}]", message.content.clone()));
484+
}
485+
486+
pub fn append_transcript(&mut self, message: String) {
487+
if self.transcript.len() >= MAX_CONVERSATION_STATE_HISTORY_LEN {
488+
self.transcript.pop_front();
489+
}
490+
self.transcript.push_back(message);
491+
}
463492
}
464493

465494
impl From<InvokeOutput> for ToolResultContentBlock {

crates/q_cli/src/cli/chat/mod.rs

Lines changed: 100 additions & 75 deletions
Original file line numberDiff line numberDiff line change
@@ -50,11 +50,11 @@ use fig_settings::Settings;
5050
use fig_util::CLI_BINARY_NAME;
5151
use input_source::InputSource;
5252
use parser::{
53-
RecvError,
5453
RecvErrorKind,
5554
ResponseParser,
5655
ToolUse,
5756
};
57+
use regex::Regex;
5858
use serde_json::Map;
5959
use spinners::{
6060
Spinner,
@@ -65,6 +65,7 @@ use tokio::signal::unix::{
6565
SignalKind,
6666
signal,
6767
};
68+
use tools::gh_issue::GhIssueContext;
6869
use tools::{
6970
Tool,
7071
ToolSpec,
@@ -260,6 +261,8 @@ pub struct ChatContext<W: Write> {
260261
/// State used to keep track of tool use relation
261262
tool_use_status: ToolUseStatus,
262263
accept_all: bool,
264+
/// Any failed requests that could be useful for error report/debugging
265+
failed_request_ids: Vec<String>,
263266
}
264267

265268
impl<W: Write> ChatContext<W> {
@@ -291,6 +294,7 @@ impl<W: Write> ChatContext<W> {
291294
tool_use_telemetry_events: HashMap::new(),
292295
tool_use_status: ToolUseStatus::Idle,
293296
accept_all,
297+
failed_request_ids: Vec::new(),
294298
})
295299
}
296300
}
@@ -388,6 +392,9 @@ where
388392
});
389393
}
390394

395+
// Remove non-ASCII and ANSI characters.
396+
let re = Regex::new(r"((\x9B|\x1B\[)[0-?]*[ -\/]*[@-~])|([^\x00-\x7F]+)").unwrap();
397+
391398
loop {
392399
debug_assert!(next_state.is_some());
393400
let chat_state = next_state.take().unwrap_or_default();
@@ -428,28 +435,37 @@ where
428435
match result {
429436
Ok(state) => next_state = Some(state),
430437
Err(e) => {
431-
fn print_error<W: Write>(
432-
output: &mut W,
433-
prepend_msg: &str,
434-
report: Option<eyre::Report>,
435-
) -> Result<(), std::io::Error> {
438+
let mut print_error = |output: &mut W,
439+
prepend_msg: &str,
440+
report: Option<eyre::Report>|
441+
-> Result<(), std::io::Error> {
436442
queue!(
437443
output,
438444
style::SetAttribute(Attribute::Bold),
439445
style::SetForegroundColor(Color::Red),
440446
)?;
441447

442448
match report {
443-
Some(report) => queue!(output, style::Print(format!("{}: {:?}\n", prepend_msg, report)),)?,
444-
None => queue!(output, style::Print(prepend_msg), style::Print("\n"))?,
449+
Some(report) => {
450+
let text = re
451+
.replace_all(&format!("{}: {:?}\n", prepend_msg, report), "")
452+
.into_owned();
453+
454+
queue!(output, style::Print(&text),)?;
455+
self.conversation_state.append_transcript(text);
456+
},
457+
None => {
458+
queue!(output, style::Print(prepend_msg), style::Print("\n"))?;
459+
self.conversation_state.append_transcript(prepend_msg.to_string());
460+
},
445461
}
446462

447463
execute!(
448464
output,
449465
style::SetAttribute(Attribute::Reset),
450466
style::SetForegroundColor(Color::Reset),
451467
)
452-
}
468+
};
453469

454470
error!(?e, "An error occurred processing the current state");
455471
if self.interactive && self.spinner.is_some() {
@@ -987,7 +1003,13 @@ where
9871003
style::Print(format!("{}\n", "▔".repeat(terminal_width))),
9881004
style::SetForegroundColor(Color::Reset),
9891005
)?;
990-
let invoke_result = tool.1.invoke(&self.ctx, &mut self.output).await;
1006+
let invoke_result = tool
1007+
.1
1008+
.invoke(&self.ctx, &mut self.output, GhIssueContext {
1009+
conversation_state: &self.conversation_state,
1010+
failed_request_ids: &self.failed_request_ids,
1011+
})
1012+
.await;
9911013

9921014
if self.interactive && self.spinner.is_some() {
9931015
queue!(
@@ -1105,78 +1127,81 @@ where
11051127
},
11061128
}
11071129
},
1108-
Err(RecvError {
1109-
request_id,
1110-
source: RecvErrorKind::StreamTimeout { source, duration },
1111-
}) => {
1112-
error!(
1113-
request_id,
1114-
?source,
1115-
"Encountered a stream timeout after waiting for {}s",
1116-
duration.as_secs()
1117-
);
1118-
if self.interactive {
1119-
execute!(self.output, cursor::Hide)?;
1120-
self.spinner = Some(Spinner::new(Spinners::Dots, "Dividing up the work...".to_string()));
1121-
}
1122-
// For stream timeouts, we'll tell the model to try and split its response into
1123-
// smaller chunks.
1124-
self.conversation_state
1125-
.push_assistant_message(AssistantResponseMessage {
1126-
message_id: None,
1127-
content: "Response timed out - message took too long to generate".to_string(),
1128-
tool_uses: None,
1129-
});
1130-
self.conversation_state
1131-
.append_new_user_message(
1132-
"You took too long to respond - try to split up the work into smaller steps.".to_string(),
1133-
)
1134-
.await;
1135-
self.send_tool_use_telemetry().await;
1136-
return Ok(ChatState::HandleResponseStream(
1137-
self.client
1138-
.send_message(self.conversation_state.as_sendable_conversation_state().await)
1139-
.await?,
1140-
));
1141-
},
1142-
Err(RecvError {
1143-
request_id,
1144-
source:
1130+
Err(recv_error) => {
1131+
if let Some(request_id) = &recv_error.request_id {
1132+
self.failed_request_ids.push(request_id.clone());
1133+
};
1134+
1135+
match recv_error.source {
1136+
RecvErrorKind::StreamTimeout { source, duration } => {
1137+
error!(
1138+
recv_error.request_id,
1139+
?source,
1140+
"Encountered a stream timeout after waiting for {}s",
1141+
duration.as_secs()
1142+
);
1143+
if self.interactive {
1144+
execute!(self.output, cursor::Hide)?;
1145+
self.spinner =
1146+
Some(Spinner::new(Spinners::Dots, "Dividing up the work...".to_string()));
1147+
}
1148+
// For stream timeouts, we'll tell the model to try and split its response into
1149+
// smaller chunks.
1150+
self.conversation_state
1151+
.push_assistant_message(AssistantResponseMessage {
1152+
message_id: None,
1153+
content: "Response timed out - message took too long to generate".to_string(),
1154+
tool_uses: None,
1155+
});
1156+
self.conversation_state
1157+
.append_new_user_message(
1158+
"You took too long to respond - try to split up the work into smaller steps."
1159+
.to_string(),
1160+
)
1161+
.await;
1162+
self.send_tool_use_telemetry().await;
1163+
return Ok(ChatState::HandleResponseStream(
1164+
self.client
1165+
.send_message(self.conversation_state.as_sendable_conversation_state().await)
1166+
.await?,
1167+
));
1168+
},
11451169
RecvErrorKind::UnexpectedToolUseEos {
11461170
tool_use_id,
11471171
name,
11481172
message,
1173+
} => {
1174+
error!(
1175+
recv_error.request_id,
1176+
tool_use_id, name, "The response stream ended before the entire tool use was received"
1177+
);
1178+
if self.interactive {
1179+
execute!(self.output, cursor::Hide)?;
1180+
self.spinner = Some(Spinner::new(
1181+
Spinners::Dots,
1182+
"The generated tool use was too large, trying to divide up the work...".to_string(),
1183+
));
1184+
}
1185+
1186+
self.conversation_state.push_assistant_message(*message);
1187+
let tool_results = vec![ToolResult {
1188+
tool_use_id,
1189+
content: vec![ToolResultContentBlock::Text(
1190+
"The generated tool was too large, try again but this time split up the work between multiple tool uses".to_string(),
1191+
)],
1192+
status: ToolResultStatus::Error,
1193+
}];
1194+
self.conversation_state.add_tool_results(tool_results);
1195+
self.send_tool_use_telemetry().await;
1196+
return Ok(ChatState::HandleResponseStream(
1197+
self.client
1198+
.send_message(self.conversation_state.as_sendable_conversation_state().await)
1199+
.await?,
1200+
));
11491201
},
1150-
}) => {
1151-
error!(
1152-
request_id,
1153-
tool_use_id, name, "The response stream ended before the entire tool use was received"
1154-
);
1155-
if self.interactive {
1156-
execute!(self.output, cursor::Hide)?;
1157-
self.spinner = Some(Spinner::new(
1158-
Spinners::Dots,
1159-
"The generated tool use was too large, trying to divide up the work...".to_string(),
1160-
));
1202+
_ => return Err(recv_error.into()),
11611203
}
1162-
1163-
self.conversation_state.push_assistant_message(*message);
1164-
let tool_results = vec![ToolResult {
1165-
tool_use_id,
1166-
content: vec![ToolResultContentBlock::Text(
1167-
"The generated tool was too large, try again but this time split up the work between multiple tool uses".to_string(),
1168-
)],
1169-
status: ToolResultStatus::Error,
1170-
}];
1171-
self.conversation_state.add_tool_results(tool_results);
1172-
self.send_tool_use_telemetry().await;
1173-
return Ok(ChatState::HandleResponseStream(
1174-
self.client
1175-
.send_message(self.conversation_state.as_sendable_conversation_state().await)
1176-
.await?,
1177-
));
11781204
},
1179-
Err(err) => return Err(err.into()),
11801205
}
11811206

11821207
// Fix for the markdown parser copied over from q chat:

0 commit comments

Comments
 (0)