Skip to content

Commit d471ca7

Browse files
committed
created basic agent
1 parent 9bb5651 commit d471ca7

File tree

6 files changed

+101
-142
lines changed

6 files changed

+101
-142
lines changed

.gitignore

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -56,3 +56,6 @@ build/
5656

5757
# Trash bin
5858
trash/
59+
60+
# Database
61+
db.sqlite3

0.2.27

Lines changed: 0 additions & 61 deletions
This file was deleted.

api/.pre-commit-config.yaml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,9 @@
11
repos:
22
- repo: https://github.com/charliermarsh/ruff-pre-commit
3-
rev: v0.5.0
3+
rev: v0.6.8
44
hooks:
55
- id: ruff
6+
args: ["--fix"]
67
- id: ruff-format
78

89
- repo: https://github.com/psf/black

api/llm/agent.py

Lines changed: 85 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,85 @@
1+
import sqlite3
2+
from typing import List, Optional
3+
4+
from dotenv import load_dotenv
5+
from langchain_google_genai import ChatGoogleGenerativeAI
6+
from langgraph.checkpoint.sqlite import SqliteSaver
7+
from langgraph.prebuilt import create_react_agent
8+
9+
load_dotenv()
10+
11+
# Global agent instance
12+
_agent_executor = None
13+
_memory = None
14+
15+
16+
def get_agent():
17+
"""Get or create the agent instance."""
18+
global _agent_executor, _memory
19+
20+
if _agent_executor is None:
21+
llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash")
22+
23+
# Build persistent checkpointer
24+
con = sqlite3.connect("db.sqlite3", check_same_thread=False)
25+
_memory = SqliteSaver(con)
26+
27+
# Create agent
28+
_agent_executor = create_react_agent(
29+
llm,
30+
[],
31+
prompt="You are a helpful AI image editing assistant. \
32+
You help users with image editing tasks and provide guidance \
33+
on how to modify their images.",
34+
checkpointer=_memory,
35+
)
36+
37+
return _agent_executor
38+
39+
40+
def chat_with_agent(
41+
message: str, user_id: str = "default", selected_images: Optional[List[str]] = None
42+
) -> str:
43+
"""
44+
Send a message to the agent and get a response.
45+
46+
Args:
47+
message: The user's message
48+
user_id: Unique identifier for the user/thread
49+
selected_images: List of selected image names (optional)
50+
51+
Returns:
52+
The agent's response as a string
53+
"""
54+
agent = get_agent()
55+
56+
# Prepare the message with context
57+
full_message = message
58+
if selected_images and len(selected_images) > 0:
59+
image_context = f" Selected images: {', '.join(selected_images)}."
60+
full_message = message + image_context
61+
62+
# Configure thread ID for conversation continuity
63+
config = {"configurable": {"thread_id": user_id}}
64+
65+
# Get response from agent
66+
response = agent.invoke(
67+
{"messages": [{"role": "user", "content": full_message}]}, config=config
68+
)
69+
70+
# Extract the last message from the agent
71+
if response and "messages" in response and len(response["messages"]) > 0:
72+
last_message = response["messages"][-1]
73+
# Handle both AIMessage objects and dictionaries
74+
if hasattr(last_message, "content"):
75+
return last_message.content
76+
elif isinstance(last_message, dict) and "content" in last_message:
77+
return last_message["content"]
78+
79+
return "I'm sorry, I couldn't process your request. Please try again."
80+
81+
82+
if __name__ == "__main__":
83+
# Test the agent
84+
response = chat_with_agent("Hello! How can you help me with image editing?")
85+
print(response)

api/pyproject.toml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ dependencies = [
1919
"langchain-core",
2020
"langgraph>0.2.27",
2121
"langchain[google-genai]",
22+
"langgraph-checkpoint-sqlite",
2223
]
2324

2425
[project.optional-dependencies]
@@ -42,6 +43,7 @@ target-version = ["py310"]
4243
line-length = 88
4344
target-version = "py310"
4445
fix = true
46+
unsafe-fixes = true
4547

4648
[tool.ruff.lint]
4749
select = ["E", "F", "I"]

api/server/main.py

Lines changed: 9 additions & 80 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,8 @@
33
from fastapi import FastAPI, HTTPException
44
from pydantic import BaseModel
55

6+
from llm.agent import chat_with_agent
7+
68
app = FastAPI(
79
title="AI Image Editor API",
810
description="API for AI-powered image editing assistant",
@@ -21,84 +23,6 @@ class ChatResponse(BaseModel):
2123
status: str = "success"
2224

2325

24-
# Template responses for different types of messages
25-
TEMPLATE_RESPONSES = {
26-
"greeting": [
27-
"Hello! I'm your AI image editing assistant. How can I help you today?",
28-
"Hi there! What would you like to work on?",
29-
"Welcome! I'm your AI assistant ready to help you transform your images.",
30-
],
31-
"image_selection": [
32-
"I can see you've selected some images. What would you like to do with them?",
33-
"Great choice! Those images look interesting. What kind of editing you need?",
34-
"Perfect! I can help you edit those selected images. What's your vision?",
35-
],
36-
"editing_request": [
37-
"I understand you want to edit your images. Let me help you with that!",
38-
"Great! I can assist you with image editing. What specific changes you need?",
39-
"Excellent! I'm ready to help you transform your images.",
40-
],
41-
"general_help": [
42-
"Just let me know what you'd like to do!",
43-
"Feel free to ask me anything about image editing.",
44-
],
45-
"upload": [
46-
"I see you've uploaded an image! What would you like to do with it?",
47-
"Great! I can help you edit that uploaded image.",
48-
"Perfect! I'm ready to work with your uploaded image. What's your vision?",
49-
],
50-
}
51-
52-
53-
def get_template_response(
54-
message: str, selected_images: Optional[List[str]] = None
55-
) -> str:
56-
"""Get a template response based on the message content and context."""
57-
message_lower = message.lower()
58-
59-
# Check for greetings
60-
if any(word in message_lower for word in ["hello", "hi", "hey", "greetings"]):
61-
import random
62-
63-
return random.choice(TEMPLATE_RESPONSES["greeting"])
64-
65-
# Check for image uploads
66-
if "uploaded" in message_lower or "📷" in message:
67-
import random
68-
69-
return random.choice(TEMPLATE_RESPONSES["upload"])
70-
71-
# Check for image selection context
72-
if selected_images and len(selected_images) > 0:
73-
import random
74-
75-
base_response = random.choice(TEMPLATE_RESPONSES["image_selection"])
76-
image_names = ", ".join(selected_images)
77-
return f"{base_response} I can see you've selected: {image_names}."
78-
79-
# Check for editing requests
80-
if any(
81-
word in message_lower
82-
for word in [
83-
"edit",
84-
"change",
85-
"modify",
86-
"transform",
87-
"enhance",
88-
"filter",
89-
"effect",
90-
]
91-
):
92-
import random
93-
94-
return random.choice(TEMPLATE_RESPONSES["editing_request"])
95-
96-
# Default response
97-
import random
98-
99-
return random.choice(TEMPLATE_RESPONSES["general_help"])
100-
101-
10226
@app.get("/")
10327
async def root():
10428
return {"message": "AI Image Editor API is running!"}
@@ -121,8 +45,13 @@ async def chat_endpoint(request: ChatRequest):
12145
ChatResponse with AI response and status.
12246
"""
12347
try:
124-
# Get template response based on message and context
125-
response = get_template_response(request.message, request.selected_images)
48+
# Use the LLM agent to get a response
49+
user_id = request.user_id or "default"
50+
response = chat_with_agent(
51+
message=request.message,
52+
user_id=user_id,
53+
selected_images=request.selected_images,
54+
)
12655

12756
return ChatResponse(response=response, status="success")
12857

0 commit comments

Comments
 (0)