Skip to content

Commit f44e62b

Browse files
authored
Reply gracefully when chat model is not selected (#1183)
* add *.chat files to gitignore * gracefully handle new messages without a selected chat model * pre-commit
1 parent 44cf827 commit f44e62b

File tree

2 files changed

+18
-7
lines changed

2 files changed

+18
-7
lines changed

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -138,3 +138,4 @@ dev.sh
138138
# Version files are auto-generated by Hatchling and should not be committed to
139139
# the source repo.
140140
packages/**/_version.py
141+
*.chat

packages/jupyter-ai/jupyter_ai/chat_handlers/base.py

Lines changed: 17 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -171,27 +171,37 @@ async def on_message(self, message: Message):
171171
"""
172172
Method which receives a human message, calls `self.get_llm_chain()`, and
173173
processes the message via `self.process_message()`, calling
174-
`self.handle_exc()` when an exception is raised. This method is called
175-
by RootChatHandler when it routes a human message to this chat handler.
174+
`self.handle_exc()` when an exception is raised.
175+
176+
This is the method called directly in response to new chat messages.
176177
"""
177-
lm_provider_klass = self.config_manager.lm_provider
178+
ChatModelProvider = self.config_manager.lm_provider
179+
180+
# first, ensure a chat model is configured
181+
if not ChatModelProvider:
182+
# TODO: update this message to be more useful once we improve
183+
# ease-of-access to the Jupyter AI settings.
184+
self.reply(
185+
"To use Jupyter AI, please select a chat model first in the Jupyter AI settings."
186+
)
187+
return
178188

179189
# ensure the current slash command is supported
180190
if self.routing_type.routing_method == "slash_command":
181191
routing_type = cast(SlashCommandRoutingType, self.routing_type)
182192
slash_command = "/" + routing_type.slash_id if routing_type.slash_id else ""
183-
if slash_command in lm_provider_klass.unsupported_slash_commands:
193+
if slash_command in ChatModelProvider.unsupported_slash_commands:
184194
self.reply(
185195
"Sorry, the selected language model does not support this slash command.",
186196
)
187197
return
188198

189199
# check whether the configured LLM can support a request at this time.
190200
if self.uses_llm and BaseChatHandler._requests_count > 0:
191-
lm_provider_params = self.config_manager.lm_provider_params
192-
lm_provider = lm_provider_klass(**lm_provider_params)
201+
chat_model_args = self.config_manager.lm_provider_params
202+
chat_model = ChatModelProvider(**chat_model_args)
193203

194-
if not lm_provider.allows_concurrency:
204+
if not chat_model.allows_concurrency:
195205
self.reply(
196206
"The currently selected language model can process only one request at a time. Please wait for me to reply before sending another question.",
197207
message,

0 commit comments

Comments
 (0)