Skip to content

Commit e58a48c

Browse files
committed
Add conversation history support to chat mode
The interactive mode now maintains conversation context across chat exchanges. The readline-based interactive mode was enhanced to track conversation history and pass it to the model on each request. A new ChatWithMessagesContext method was added to support conversation history while maintaining backward compatibility through the existing ChatWithContext method. The conversation history uses the processed user messages after file inclusions and image processing to ensure accuracy. Additionally, the code now properly handles context cancellation by returning the assistant response when cancelled. The basic interactive mode fallback was removed since readline initialization is now required. Signed-off-by: Eric Curtin <[email protected]>
1 parent fb80c6d commit e58a48c

File tree

4 files changed

+116
-125
lines changed

4 files changed

+116
-125
lines changed

cmd/cli/commands/run.go

Lines changed: 82 additions & 93 deletions
Original file line numberDiff line numberDiff line change
@@ -140,8 +140,7 @@ func generateInteractiveWithReadline(cmd *cobra.Command, desktopClient *desktop.
140140
AltPlaceholder: `Use """ to end multi-line input`,
141141
})
142142
if err != nil {
143-
// Fall back to basic input mode if readline initialization fails
144-
return generateInteractiveBasic(cmd, desktopClient, model)
143+
return err
145144
}
146145

147146
// Disable history if the environment variable is set
@@ -154,6 +153,7 @@ func generateInteractiveWithReadline(cmd *cobra.Command, desktopClient *desktop.
154153

155154
var sb strings.Builder
156155
var multiline bool
156+
var conversationHistory []desktop.OpenAIChatMessage
157157

158158
// Add a helper function to handle file inclusion when @ is pressed
159159
// We'll implement a basic version here that shows a message when @ is pressed
@@ -245,7 +245,7 @@ func generateInteractiveWithReadline(cmd *cobra.Command, desktopClient *desktop.
245245
}
246246
}()
247247

248-
err := chatWithMarkdownContext(chatCtx, cmd, desktopClient, model, userInput)
248+
assistantResponse, processedUserMessage, err := chatWithMarkdownContext(chatCtx, cmd, desktopClient, model, userInput, conversationHistory)
249249

250250
// Clean up signal handler
251251
signal.Stop(sigChan)
@@ -263,70 +263,21 @@ func generateInteractiveWithReadline(cmd *cobra.Command, desktopClient *desktop.
263263
continue
264264
}
265265

266+
// Add the processed user message and assistant response to conversation history.
267+
// Using the processed message ensures the history reflects exactly what the model
268+
// received (after file inclusions and image processing), not the raw user input.
269+
conversationHistory = append(conversationHistory, processedUserMessage)
270+
conversationHistory = append(conversationHistory, desktop.OpenAIChatMessage{
271+
Role: "assistant",
272+
Content: assistantResponse,
273+
})
274+
266275
cmd.Println()
267276
sb.Reset()
268277
}
269278
}
270279
}
271280

272-
// generateInteractiveBasic provides a basic interactive mode (fallback)
273-
func generateInteractiveBasic(cmd *cobra.Command, desktopClient *desktop.Client, model string) error {
274-
scanner := bufio.NewScanner(os.Stdin)
275-
for {
276-
userInput, err := readMultilineInput(cmd, scanner)
277-
if err != nil {
278-
if err.Error() == "EOF" {
279-
break
280-
}
281-
return fmt.Errorf("Error reading input: %w", err)
282-
}
283-
284-
if strings.ToLower(strings.TrimSpace(userInput)) == "/bye" {
285-
break
286-
}
287-
288-
if strings.TrimSpace(userInput) == "" {
289-
continue
290-
}
291-
292-
// Create a cancellable context for the chat request
293-
// This allows us to cancel the request if the user presses Ctrl+C during response generation
294-
chatCtx, cancelChat := context.WithCancel(cmd.Context())
295-
296-
// Set up signal handler to cancel the context on Ctrl+C
297-
sigChan := make(chan os.Signal, 1)
298-
signal.Notify(sigChan, syscall.SIGINT)
299-
go func() {
300-
select {
301-
case <-sigChan:
302-
cancelChat()
303-
case <-chatCtx.Done():
304-
// Context cancelled, exit goroutine
305-
// Context cancelled, exit goroutine
306-
}
307-
}()
308-
309-
err = chatWithMarkdownContext(chatCtx, cmd, desktopClient, model, userInput)
310-
311-
cancelChat()
312-
signal.Stop(sigChan)
313-
cancelChat()
314-
315-
if err != nil {
316-
// Check if the error is due to context cancellation (Ctrl+C during response)
317-
if errors.Is(err, context.Canceled) {
318-
fmt.Println("\nUse Ctrl + d or /bye to exit.")
319-
} else {
320-
cmd.PrintErrln(handleClientError(err, "Failed to generate a response"))
321-
}
322-
continue
323-
}
324-
325-
cmd.Println()
326-
}
327-
return nil
328-
}
329-
330281
var (
331282
markdownRenderer *glamour.TermRenderer
332283
lastWidth int
@@ -507,47 +458,93 @@ func renderMarkdown(content string) (string, error) {
507458
return rendered, nil
508459
}
509460

461+
// buildUserMessage constructs an OpenAIChatMessage for the user with the processed prompt and images.
462+
// This is used to ensure conversation history reflects exactly what the model received.
463+
func buildUserMessage(prompt string, imageURLs []string) desktop.OpenAIChatMessage {
464+
if len(imageURLs) > 0 {
465+
// Multimodal message with images - build content array
466+
contentParts := make([]desktop.ContentPart, 0, len(imageURLs)+1)
467+
468+
// Add all images first
469+
for _, imageURL := range imageURLs {
470+
contentParts = append(contentParts, desktop.ContentPart{
471+
Type: "image_url",
472+
ImageURL: &desktop.ImageURL{
473+
URL: imageURL,
474+
},
475+
})
476+
}
477+
478+
// Add text prompt if present
479+
if prompt != "" {
480+
contentParts = append(contentParts, desktop.ContentPart{
481+
Type: "text",
482+
Text: prompt,
483+
})
484+
}
485+
486+
return desktop.OpenAIChatMessage{
487+
Role: "user",
488+
Content: contentParts,
489+
}
490+
}
491+
492+
// Simple text-only message
493+
return desktop.OpenAIChatMessage{
494+
Role: "user",
495+
Content: prompt,
496+
}
497+
}
498+
510499
// chatWithMarkdown performs chat and streams the response with selective markdown rendering.
511500
func chatWithMarkdown(cmd *cobra.Command, client *desktop.Client, model, prompt string) error {
512-
return chatWithMarkdownContext(cmd.Context(), cmd, client, model, prompt)
501+
_, _, err := chatWithMarkdownContext(cmd.Context(), cmd, client, model, prompt, nil)
502+
return err
513503
}
514504

515505
// chatWithMarkdownContext performs chat with context support and streams the response with selective markdown rendering.
516-
func chatWithMarkdownContext(ctx context.Context, cmd *cobra.Command, client *desktop.Client, model, prompt string) error {
506+
// It accepts an optional conversation history and returns both the assistant's response and the processed user message
507+
// (after file inclusions and image processing) for accurate history tracking.
508+
func chatWithMarkdownContext(ctx context.Context, cmd *cobra.Command, client *desktop.Client, model, prompt string, conversationHistory []desktop.OpenAIChatMessage) (assistantResponse string, processedUserMessage desktop.OpenAIChatMessage, err error) {
517509
colorMode, _ := cmd.Flags().GetString("color")
518510
useMarkdown := shouldUseMarkdown(colorMode)
519511
debug, _ := cmd.Flags().GetBool("debug")
520512

521513
// Process file inclusions first (files referenced with @ symbol)
522-
prompt, err := processFileInclusions(prompt)
514+
prompt, err = processFileInclusions(prompt)
523515
if err != nil {
524-
return fmt.Errorf("failed to process file inclusions: %w", err)
516+
return "", desktop.OpenAIChatMessage{}, fmt.Errorf("failed to process file inclusions: %w", err)
525517
}
526518

527519
var imageURLs []string
528520
cleanedPrompt, imgs, err := processImagesInPrompt(prompt)
529521
if err != nil {
530-
return fmt.Errorf("failed to process images: %w", err)
522+
return "", desktop.OpenAIChatMessage{}, fmt.Errorf("failed to process images: %w", err)
531523
}
532524
prompt = cleanedPrompt
533525
imageURLs = imgs
534526

527+
// Build the processed user message to return for history tracking.
528+
// This reflects exactly what the model receives.
529+
processedUserMessage = buildUserMessage(prompt, imageURLs)
530+
535531
if !useMarkdown {
536532
// Simple case: just stream as plain text
537-
return client.ChatWithContext(ctx, model, prompt, imageURLs, func(content string) {
533+
assistantResponse, err = client.ChatWithMessagesContext(ctx, model, conversationHistory, prompt, imageURLs, func(content string) {
538534
cmd.Print(content)
539535
}, false)
536+
return assistantResponse, processedUserMessage, err
540537
}
541538

542539
// For markdown: use streaming buffer to render code blocks as they complete
543540
markdownBuffer := NewStreamingMarkdownBuffer()
544541

545-
err = client.ChatWithContext(ctx, model, prompt, imageURLs, func(content string) {
542+
assistantResponse, err = client.ChatWithMessagesContext(ctx, model, conversationHistory, prompt, imageURLs, func(content string) {
546543
// Use the streaming markdown buffer to intelligently render content
547-
rendered, err := markdownBuffer.AddContent(content, true)
548-
if err != nil {
544+
rendered, renderErr := markdownBuffer.AddContent(content, true)
545+
if renderErr != nil {
549546
if debug {
550-
cmd.PrintErrln(err)
547+
cmd.PrintErrln(renderErr)
551548
}
552549
// Fallback to plain text on error
553550
cmd.Print(content)
@@ -556,15 +553,15 @@ func chatWithMarkdownContext(ctx context.Context, cmd *cobra.Command, client *de
556553
}
557554
}, true)
558555
if err != nil {
559-
return err
556+
return assistantResponse, processedUserMessage, err
560557
}
561558

562559
// Flush any remaining content from the markdown buffer
563560
if remaining, flushErr := markdownBuffer.Flush(true); flushErr == nil && remaining != "" {
564561
cmd.Print(remaining)
565562
}
566563

567-
return nil
564+
return assistantResponse, processedUserMessage, nil
568565
}
569566

570567
func newRunCmd() *cobra.Command {
@@ -641,14 +638,10 @@ func newRunCmd() *cobra.Command {
641638
return nil
642639
}
643640

644-
// Interactive mode for external OpenAI endpoint
645-
if term.IsTerminal(int(os.Stdin.Fd())) {
646-
termenv.SetDefaultOutput(
647-
termenv.NewOutput(asPrinter(cmd), termenv.WithColorCache(true)),
648-
)
649-
return generateInteractiveWithReadline(cmd, openaiClient, model)
650-
}
651-
return generateInteractiveBasic(cmd, openaiClient, model)
641+
termenv.SetDefaultOutput(
642+
termenv.NewOutput(asPrinter(cmd), termenv.WithColorCache(true)),
643+
)
644+
return generateInteractiveWithReadline(cmd, openaiClient, model)
652645
}
653646

654647
if _, err := ensureStandaloneRunnerAvailable(cmd.Context(), asPrinter(cmd), debug); err != nil {
@@ -746,19 +739,15 @@ func newRunCmd() *cobra.Command {
746739
return nil
747740
}
748741

749-
// Use enhanced readline-based interactive mode when terminal is available
750-
if term.IsTerminal(int(os.Stdin.Fd())) {
751-
// Initialize termenv with color caching before starting interactive session.
752-
// This queries the terminal background color once and caches it, preventing
753-
// OSC response sequences from appearing in stdin during the interactive loop.
754-
termenv.SetDefaultOutput(
755-
termenv.NewOutput(asPrinter(cmd), termenv.WithColorCache(true)),
756-
)
757-
return generateInteractiveWithReadline(cmd, desktopClient, model)
758-
}
742+
// Initialize termenv with color caching before starting interactive session.
743+
// This queries the terminal background color once and caches it, preventing
744+
// OSC response sequences from appearing in stdin during the interactive loop.
745+
termenv.SetDefaultOutput(
746+
termenv.NewOutput(asPrinter(cmd), termenv.WithColorCache(true)),
747+
)
748+
749+
return generateInteractiveWithReadline(cmd, desktopClient, model)
759750

760-
// Fall back to basic mode if not a terminal
761-
return generateInteractiveBasic(cmd, desktopClient, model)
762751
},
763752
ValidArgsFunction: completion.ModelNames(getDesktopClient, 1),
764753
}

0 commit comments

Comments
 (0)