Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ dependencies = [
"elasticsearch>=9.1.0",
"fastapi[standard]>=0.116.1",
"foundry-local-sdk>=0.4.0",
"gtts>=2.5.4",
"httpx>=0.28.1",
"jinja2>=3.1.2",
"langchain-azure-ai>=0.1.4",
Expand Down
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
from base64 import b64encode

import streamlit as st
from langchain_community.callbacks.streamlit import (
StreamlitCallbackHandler,
Expand All @@ -9,6 +11,11 @@
)
from template_langgraph.tools.common import get_default_tools


def image_to_base64(image_bytes: bytes) -> str:
return b64encode(image_bytes).decode("utf-8")


if "chat_history" not in st.session_state:
st.session_state["chat_history"] = []

Expand Down Expand Up @@ -43,16 +50,89 @@

for msg in st.session_state["chat_history"]:
if isinstance(msg, dict):
st.chat_message(msg["role"]).write(msg["content"])
attachments = msg.get("attachments", [])
with st.chat_message(msg["role"]):
if attachments:
for item in attachments:
if item["type"] == "text":
st.markdown(item["text"])
elif item["type"] == "image_url":
st.image(item["image_url"]["url"])
else:
st.write(msg["content"])
else:
st.chat_message("assistant").write(msg.content)

if prompt := st.chat_input():
st.session_state["chat_history"].append({"role": "user", "content": prompt})
st.chat_message("user").write(prompt)
if prompt := st.chat_input(
accept_file="multiple",
file_type=[
"png",
"jpg",
"jpeg",
"gif",
"webp",
],
):
user_display_items = []
message_parts = []

prompt_text = prompt if isinstance(prompt, str) else getattr(prompt, "text", "") or ""
prompt_files = [] if isinstance(prompt, str) else (getattr(prompt, "files", []) or [])

user_text = prompt_text
if user_text.strip():
user_display_items.append({"type": "text", "text": user_text})
message_parts.append(user_text)

has_unsupported_files = False
for file in prompt_files:
if file.type and file.type.startswith("image/"):
image_bytes = file.getvalue()
base64_image = image_to_base64(image_bytes)
image_url = f"data:{file.type};base64,{base64_image}"
user_display_items.append(
{
"type": "image_url",
"image_url": {"url": image_url},
}
)
message_parts.append(f"![image]({image_url})")
else:
has_unsupported_files = True

if has_unsupported_files:
st.warning("画像ファイル以外の添付は現在サポートされていません。")

message_content = "\n\n".join(message_parts).strip()
if not message_content:
message_content = "ユーザーが画像をアップロードしました。"

new_user_message = {"role": "user", "content": message_content}
if user_display_items:
new_user_message["attachments"] = user_display_items

st.session_state["chat_history"].append(new_user_message)

with st.chat_message("user"):
if user_display_items:
for item in user_display_items:
if item["type"] == "text":
st.markdown(item["text"])
elif item["type"] == "image_url":
st.image(item["image_url"]["url"])
else:
st.write(message_content)

graph_messages = []
for msg in st.session_state["chat_history"]:
if isinstance(msg, dict):
graph_messages.append({"role": msg["role"], "content": msg["content"]})
else:
graph_messages.append(msg)

with st.chat_message("assistant"):
response: AgentState = st.session_state["graph"].invoke(
{"messages": st.session_state["chat_history"]},
{"messages": graph_messages},
{
"callbacks": [
StreamlitCallbackHandler(st.container()),
Expand Down
21 changes: 18 additions & 3 deletions uv.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.