Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
108 changes: 27 additions & 81 deletions jupyter_ai_personas/emoji_persona/persona.py
Original file line number Diff line number Diff line change
@@ -1,57 +1,9 @@
from typing import Any

import emoji
from jupyter_ai.personas.base_persona import BasePersona, PersonaDefaults
from jupyterlab_chat.models import Message
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables.history import RunnableWithMessageHistory

from jupyter_ai.history import YChatHistory
from jupyter_ai.personas import BasePersona, PersonaDefaults
from jupyter_ai.personas.jupyternaut.prompt_template import JupyternautVariables

from langchain.prompts import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
)


_SYSTEM_PROMPT_FORMAT = """
<instructions>

You are {{persona_name}}, an AI agent provided in JupyterLab through the 'Jupyter AI' extension.

Jupyter AI is an installable software package listed on PyPI and Conda Forge as `jupyter-ai`.

When installed, Jupyter AI adds a chat experience in JupyterLab that allows multiple users to collaborate with one or more agents like yourself.

You are not a language model, but rather an AI agent powered by a foundation model `{{model_id}}`, provided by '{{provider_name}}'.
from litellm import acompletion

You are receiving a request from a user in JupyterLab. Your goal is to respond to user's query with emojis (:emoji: format) in response.

You will receive any provided context and a relevant portion of the chat history.

The user's request is located at the last message. Please fulfill the user's request to the best of your ability.
</instructions>

<context>
{% if context %}The user has shared the following context:

{{context}}
{% else %}The user did not share any additional context.{% endif %}
</context>
""".strip()
import emoji

PROMPT_TEMPLATE = ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate.from_template(
_SYSTEM_PROMPT_FORMAT, template_format="jinja2"
),
MessagesPlaceholder(variable_name="history"),
HumanMessagePromptTemplate.from_template("{input}"),
]
)

class EmojiPersona(BasePersona):
"""
Expand All @@ -60,44 +12,38 @@ class EmojiPersona(BasePersona):

def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)


@property
def defaults(self):
return PersonaDefaults(
name="EmojiPersona",
avatar_path="/api/ai/static/jupyternaut.svg",
description="The emoji agent, that responds with emojis.",
system_prompt="...",
description="The emoji agent, always responds with emojis.",
system_prompt="You are an AI that responds to all user queries using only emojis. Do not use any words, only emojis. If you don't know how to respond with emojis, respond with a shrug emoji.",
)

async def process_message(self, message: Message):
provider_name = self.config_manager.lm_provider.name
model_id = self.config_manager.lm_provider_params["model_id"]

runnable = self.build_runnable()
variables = JupyternautVariables(
input=message.body,
model_id=model_id,
provider_name=provider_name,
persona_name=self.name,
)

variables_dict = variables.model_dump()
reply = runnable.invoke(variables_dict)
print(f"reply from model: {reply}")
reply = emoji.emojize(reply, variant="emoji_type")
print(f"reply after emojize: {reply}")
self.send_message(reply)

def build_runnable(self) -> Any:
llm = self.config_manager.lm_provider(**self.config_manager.lm_provider_params)
if not self.config_manager.chat_model:
self.send_message(
"No chat model is configured.\n\n"
"You must set one first in the Jupyter AI settings, found in 'Settings > AI Settings' from the menu bar."
)
return

runnable = PROMPT_TEMPLATE | llm | StrOutputParser()
runnable = RunnableWithMessageHistory(
runnable=runnable, # type:ignore[arg-type]
get_session_history=lambda: YChatHistory(ychat=self.ychat, k=0),
input_messages_key="input",
history_messages_key="history",
model_id = self.config_manager.chat_model
model_args = self.config_manager.chat_model_args
message.body = message.body + "\n\nDecorate your reply with emojis."
response_aiter = await acompletion(
**model_args,
model=model_id,
messages=[
{
"role": "user",
"content": message.body,
},
],
stream=False,
)
return runnable
response_text = response_aiter.choices[0].message.content
emoji_text = emoji.emojize(response_text, variant="emoji_type")
self.send_message(emoji_text)
89 changes: 49 additions & 40 deletions jupyter_ai_personas/finance_persona/persona.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,11 @@
from pydantic import Field, BaseModel

from jupyterlab_chat.models import Message
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables.history import RunnableWithMessageHistory
from jupyter_core.paths import jupyter_data_dir

from jupyter_ai.history import YChatHistory
from jupyter_ai.personas import BasePersona, PersonaDefaults
from jupyter_ai.personas.jupyternaut.prompt_template import JUPYTERNAUT_PROMPT_TEMPLATE, JupyternautVariables
from jupyter_ai.config_manager import DEFAULT_CONFIG_PATH
from litellm import acompletion

from agno.agent import Agent
from agno.models.aws import AwsBedrock
Expand Down Expand Up @@ -71,49 +68,61 @@ def defaults(self):


async def process_message(self, message: Message):
provider_name = self.config_manager.lm_provider.name
model_id = self.config_manager.lm_provider_params["model_id"]

runnable = self.build_runnable()
variables = JupyternautVariables(
input=message.body,
model_id=model_id,
provider_name=provider_name,
persona_name=self.name,
)

# Check if the prompt is about finance. If so, pass on to agentic workflow, else use default handling
prompt = variables.input.split(" ", 1)[1]
llm = self.config_manager.lm_provider(**self.config_manager.lm_provider_params)
llm = llm.with_structured_output(
UserQueryClassifier,
if not self.config_manager.chat_model:
self.send_message(
"No chat model is configured.\n\n"
"You must set one first in the Jupyter AI settings, found in 'Settings > AI Settings' from the menu bar."
)
return

model_id = self.config_manager.chat_model
model_args = self.config_manager.chat_model_args

# Check if the prompt is about finance using litellm
prompt = message.body.split(" ", 1)[1]
response = await acompletion(
**model_args,
model=model_id,
messages=[
{
"role": "system",
"content": "You are a classifier that determines if a message is finance related. Respond with true or false."
},
{
"role": "user",
"content": prompt,
},
],
stream=False,
)
response = llm.invoke(prompt) # Gets the full AI message response
is_finance = "true" in response.choices[0].message.content.lower()

# If the message is finance-related, proceed with default handling
if response.is_finance_related: # type:ignore[union-attr]
msg = variables.input.split(" ", 1)[1].strip()
# If the message is finance-related, proceed with agentic handling
if is_finance:
msg = message.body.split(" ", 1)[1].strip()
if msg:
# Call the agno_finance function to process the message
self.agno_finance(msg)
else:
self.send_message("Error: Query failed. Please try again with a different query.")
else: # If the message is not finance-related, use the default runnable
variables_dict = variables.model_dump()
reply_stream = runnable.astream(variables_dict)
await self.stream_message(reply_stream)

def build_runnable(self) -> Any:
# TODO: support model parameters. maybe we just add it to lm_provider_params in both 2.x and 3.x
llm = self.config_manager.lm_provider(**self.config_manager.lm_provider_params)
runnable = JUPYTERNAUT_PROMPT_TEMPLATE | llm | StrOutputParser()
runnable = RunnableWithMessageHistory(
runnable=runnable, # type:ignore[arg-type]
get_session_history=lambda: YChatHistory(ychat=self.ychat, k=0),
input_messages_key="input",
history_messages_key="history",
)
return runnable
else: # If the message is not finance-related, use litellm
response = await acompletion(
**model_args,
model=model_id,
messages=[
{
"role": "system",
"content": "You are a helpful AI assistant."
},
{
"role": "user",
"content": message.body,
},
],
stream=False,
)
self.send_message(response.choices[0].message.content)


# Use Agno to process financial prompts
# Multi agent workflow to get stock prices and forecast them using ARIMA
Expand Down
7 changes: 4 additions & 3 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@ build-backend = "hatchling.build"
[project.entry-points."jupyter_ai.personas"]
finance_persona = "jupyter_ai_personas.finance_persona.persona:FinancePersona"
emoji_persona = "jupyter_ai_personas.emoji_persona.persona:EmojiPersona"
software_team_persona = "jupyter_ai_personas.software_team_persona.persona:SoftwareTeamPersona"
data_analytics_persona = "jupyter_ai_personas.data_analytics_persona.persona:DataAnalyticsTeam"
pr_review_persona = "jupyter_ai_personas.pr_review_persona.persona:PRReviewPersona"
# Uncomment the personas below once they are refactored for litellm in Jupyter AI
# software_team_persona = "jupyter_ai_personas.software_team_persona.persona:SoftwareTeamPersona"
# data_analytics_persona = "jupyter_ai_personas.data_analytics_persona.persona:DataAnalyticsTeam"
# pr_review_persona = "jupyter_ai_personas.pr_review_persona.persona:PRReviewPersona"
Loading