Skip to content

Commit 289d39d

Browse files
authored
Add /fix slash command (#828)
* remove unused clear_memory() method in default.py * implement error output signaling and retrieval in ActiveCellContext * fixup * implement /fix slash command * pre-commit * add docstring * edit reply message when no active cell with error output exists * add user documentation and screenshots for /fix
1 parent e214ee7 commit 289d39d

File tree

18 files changed

+481
-80
lines changed

18 files changed

+481
-80
lines changed
82.4 KB
Loading
21.1 KB
Loading
164 KB
Loading

docs/source/users/index.md

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -497,6 +497,36 @@ The `/learn` command also provides downloading and processing papers from the [a
497497
Use the `/export` command to export the chat history from the current session to a markdown file named `chat_history-YYYY-MM-DD-HH-mm.md`. Using `/export <file_name>` will export the chat history to `<file_name>-YYYY-MM-DD-HH-mm.md` instead. You can export chat history as many times as you like in a single session. Each successive export will include the entire chat history up to that point in the session.
498498

499499

500+
### Fixing a code cell with an error
501+
502+
The `/fix` command can be used to fix any code cell with an error output in a
503+
Jupyter notebook file. To start, type `/fix` into the chat input. Jupyter AI
504+
will then prompt you to select a cell with error output before sending the
505+
request.
506+
507+
<img src="../_static/fix-no-error-cell-selected.png"
508+
alt='Screenshot of the chat input containing `/fix` without a code cell with error output selected.'
509+
class="screenshot" />
510+
511+
Then click on a code cell with error output. A blue bar should appear
512+
immediately to the left of the code cell.
513+
514+
<img src="../_static/fix-error-cell-selected.png"
515+
alt='Screenshot of a code cell with error output selected.'
516+
class="screenshot" />
517+
518+
After this, the Send button to the right of the chat input will be enabled, and
519+
you can use your mouse or keyboard to send `/fix` to Jupyternaut. The code cell
520+
and its associated error output are included in the message automatically. When
521+
complete, Jupyternaut will reply with suggested code that should fix the error.
522+
You can use the action toolbar under each code block to quickly replace the
523+
contents of the failing cell.
524+
525+
<img src="../_static/fix-response.png"
526+
alt='Screenshot of a response from `/fix`, with the "Replace active cell" action hovered.'
527+
class="screenshot" style="max-width:65%" />
528+
529+
500530
### Additional chat commands
501531

502532
To clear the chat panel, use the `/clear` command. This does not reset the AI model; the model may still remember previous messages that you sent it, and it may use them to inform its responses.

packages/jupyter-ai/jupyter_ai/chat_handlers/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
from .clear import ClearChatHandler
44
from .default import DefaultChatHandler
55
from .export import ExportChatHandler
6+
from .fix import FixChatHandler
67
from .generate import GenerateChatHandler
78
from .help import HelpChatHandler
89
from .learn import LearnChatHandler

packages/jupyter-ai/jupyter_ai/chat_handlers/default.py

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -43,19 +43,6 @@ def create_llm_chain(
4343
llm=llm, prompt=prompt_template, verbose=True, memory=self.memory
4444
)
4545

46-
def clear_memory(self):
47-
# clear chain memory
48-
if self.memory:
49-
self.memory.clear()
50-
51-
# clear transcript for existing chat clients
52-
reply_message = ClearMessage()
53-
self.reply(reply_message)
54-
55-
# clear transcript for new chat clients
56-
if self._chat_history:
57-
self._chat_history.clear()
58-
5946
async def process_message(self, message: HumanChatMessage):
6047
self.get_llm_chain()
6148
response = await self.llm_chain.apredict(input=message.body, stop=["\nHuman:"])
Lines changed: 103 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,103 @@
1+
from typing import Dict, Type
2+
3+
from jupyter_ai.models import CellWithErrorSelection, HumanChatMessage
4+
from jupyter_ai_magics.providers import BaseProvider
5+
from langchain.chains import LLMChain
6+
from langchain.prompts import PromptTemplate
7+
8+
from .base import BaseChatHandler, SlashCommandRoutingType
9+
10+
FIX_STRING_TEMPLATE = """
11+
You are Jupyternaut, a conversational assistant living in JupyterLab. Please fix
12+
the notebook cell described below.
13+
14+
Additional instructions:
15+
16+
{extra_instructions}
17+
18+
Input cell:
19+
20+
```
21+
{cell_content}
22+
```
23+
24+
Output error:
25+
26+
```
27+
{traceback}
28+
29+
{error_name}: {error_value}
30+
```
31+
""".strip()
32+
33+
FIX_PROMPT_TEMPLATE = PromptTemplate(
34+
input_variables=[
35+
"extra_instructions",
36+
"cell_content",
37+
"traceback",
38+
"error_name",
39+
"error_value",
40+
],
41+
template=FIX_STRING_TEMPLATE,
42+
)
43+
44+
45+
class FixChatHandler(BaseChatHandler):
46+
"""
47+
Accepts a `HumanChatMessage` that includes a cell with error output and
48+
recommends a fix as a reply. If a cell with error output is not included,
49+
this chat handler does nothing.
50+
51+
`/fix` also accepts additional instructions in natural language as an
52+
arbitrary number of arguments, e.g.
53+
54+
```
55+
/fix use the numpy library to implement this function instead.
56+
```
57+
"""
58+
59+
id = "fix"
60+
name = "Fix error cell"
61+
help = "Fix an error cell selected in your notebook"
62+
routing_type = SlashCommandRoutingType(slash_id="fix")
63+
uses_llm = True
64+
65+
def __init__(self, *args, **kwargs):
66+
super().__init__(*args, **kwargs)
67+
68+
def create_llm_chain(
69+
self, provider: Type[BaseProvider], provider_params: Dict[str, str]
70+
):
71+
unified_parameters = {
72+
**provider_params,
73+
**(self.get_model_parameters(provider, provider_params)),
74+
}
75+
llm = provider(**unified_parameters)
76+
77+
self.llm = llm
78+
self.llm_chain = LLMChain(llm=llm, prompt=FIX_PROMPT_TEMPLATE, verbose=True)
79+
80+
async def process_message(self, message: HumanChatMessage):
81+
if not (message.selection and message.selection.type == "cell-with-error"):
82+
self.reply(
83+
"`/fix` requires an active code cell with error output. Please click on a cell with error output and retry.",
84+
message,
85+
)
86+
return
87+
88+
# hint type of selection
89+
selection: CellWithErrorSelection = message.selection
90+
91+
# parse additional instructions specified after `/fix`
92+
extra_instructions = message.body[4:].strip() or "None."
93+
94+
self.get_llm_chain()
95+
response = await self.llm_chain.apredict(
96+
extra_instructions=extra_instructions,
97+
stop=["\nHuman:"],
98+
cell_content=selection.source,
99+
error_name=selection.error.name,
100+
error_value=selection.error.value,
101+
traceback="\n".join(selection.error.traceback),
102+
)
103+
self.reply(response, message)

packages/jupyter-ai/jupyter_ai/extension.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
ClearChatHandler,
1818
DefaultChatHandler,
1919
ExportChatHandler,
20+
FixChatHandler,
2021
GenerateChatHandler,
2122
HelpChatHandler,
2223
LearnChatHandler,
@@ -264,13 +265,17 @@ def initialize_settings(self):
264265
ask_chat_handler = AskChatHandler(**chat_handler_kwargs, retriever=retriever)
265266

266267
export_chat_handler = ExportChatHandler(**chat_handler_kwargs)
268+
269+
fix_chat_handler = FixChatHandler(**chat_handler_kwargs)
270+
267271
jai_chat_handlers = {
268272
"default": default_chat_handler,
269273
"/ask": ask_chat_handler,
270274
"/clear": clear_chat_handler,
271275
"/generate": generate_chat_handler,
272276
"/learn": learn_chat_handler,
273277
"/export": export_chat_handler,
278+
"/fix": fix_chat_handler,
274279
}
275280

276281
help_chat_handler = HelpChatHandler(

packages/jupyter-ai/jupyter_ai/handlers.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -207,6 +207,7 @@ async def on_message(self, message):
207207
id=chat_message_id,
208208
time=time.time(),
209209
body=chat_request.prompt,
210+
selection=chat_request.selection,
210211
client=self.chat_client,
211212
)
212213

packages/jupyter-ai/jupyter_ai/models.py

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,29 @@
88
DEFAULT_CHUNK_OVERLAP = 100
99

1010

11+
class CellError(BaseModel):
12+
name: str
13+
value: str
14+
traceback: List[str]
15+
16+
17+
class CellWithErrorSelection(BaseModel):
18+
type: Literal["cell-with-error"] = "cell-with-error"
19+
source: str
20+
error: CellError
21+
22+
23+
Selection = Union[CellWithErrorSelection]
24+
25+
1126
# the type of message used to chat with the agent
1227
class ChatRequest(BaseModel):
1328
prompt: str
29+
# TODO: This currently is only used when a user runs the /fix slash command.
30+
# In the future, the frontend should set the text selection on this field in
31+
# the `HumanChatMessage` it sends to JAI, instead of appending the text
32+
# selection to `body` in the frontend.
33+
selection: Optional[Selection]
1434

1535

1636
class ChatUser(BaseModel):
@@ -55,6 +75,7 @@ class HumanChatMessage(BaseModel):
5575
time: float
5676
body: str
5777
client: ChatClient
78+
selection: Optional[Selection]
5879

5980

6081
class ConnectionMessage(BaseModel):

0 commit comments

Comments
 (0)