|
| 1 | +import os |
| 2 | +from base64 import b64encode |
| 3 | + |
| 4 | +import httpx |
| 5 | +from langgraph.graph import StateGraph |
| 6 | +from langgraph.types import Send |
| 7 | + |
| 8 | +from template_langgraph.agents.image_classifier_agent.models import ( |
| 9 | + AgentState, |
| 10 | + ClassifyImageState, |
| 11 | + Result, |
| 12 | + Results, |
| 13 | +) |
| 14 | +from template_langgraph.llms.azure_openais import AzureOpenAiWrapper |
| 15 | +from template_langgraph.loggers import get_logger |
| 16 | + |
| 17 | +logger = get_logger(__name__) |
| 18 | + |
| 19 | + |
| 20 | +def load_image_to_base64(image_path: str) -> str: |
| 21 | + with open(image_path, "rb") as image_file: |
| 22 | + return b64encode(image_file.read()).decode("utf-8") |
| 23 | + |
| 24 | + |
| 25 | +class MockNotifier: |
| 26 | + def notify(self, id: str, body: dict) -> None: |
| 27 | + """Simulate sending a notification to the user.""" |
| 28 | + logger.info(f"Notification sent for request {id}: {body}") |
| 29 | + |
| 30 | + |
| 31 | +class MockClassifier: |
| 32 | + def predict( |
| 33 | + self, |
| 34 | + prompt: str, |
| 35 | + image: str, |
| 36 | + llm=AzureOpenAiWrapper().chat_model, |
| 37 | + ) -> Result: |
| 38 | + """Simulate image classification.""" |
| 39 | + return Result( |
| 40 | + title="Mocked Image Title", |
| 41 | + summary=f"Mocked summary of the prompt: {prompt}", |
| 42 | + labels=["mocked_label_1", "mocked_label_2"], |
| 43 | + reliability=0.95, |
| 44 | + ) |
| 45 | + |
| 46 | + |
| 47 | +class LlmClassifier: |
| 48 | + def predict( |
| 49 | + self, |
| 50 | + prompt: str, |
| 51 | + image: str, |
| 52 | + llm=AzureOpenAiWrapper().chat_model, |
| 53 | + ) -> Result: |
| 54 | + """Use the LLM to classify the image.""" |
| 55 | + logger.info(f"Classifying image with LLM: {prompt}") |
| 56 | + return llm.with_structured_output(Result).invoke( |
| 57 | + input=[ |
| 58 | + { |
| 59 | + "role": "user", |
| 60 | + "content": [ |
| 61 | + { |
| 62 | + "type": "text", |
| 63 | + "text": prompt, |
| 64 | + }, |
| 65 | + { |
| 66 | + "type": "image", |
| 67 | + "source_type": "base64", |
| 68 | + "data": image, |
| 69 | + "mime_type": "image/png", |
| 70 | + }, |
| 71 | + ], |
| 72 | + }, |
| 73 | + ] |
| 74 | + ) |
| 75 | + |
| 76 | + |
| 77 | +class ImageClassifierAgent: |
| 78 | + def __init__( |
| 79 | + self, |
| 80 | + llm=AzureOpenAiWrapper().chat_model, |
| 81 | + notifier=MockNotifier(), |
| 82 | + classifier=MockClassifier(), |
| 83 | + ): |
| 84 | + self.llm = llm |
| 85 | + self.notifier = notifier |
| 86 | + self.classifier = classifier |
| 87 | + |
| 88 | + def create_graph(self): |
| 89 | + """Create the main graph for the agent.""" |
| 90 | + # Create the workflow state graph |
| 91 | + workflow = StateGraph(AgentState) |
| 92 | + |
| 93 | + # Create nodes |
| 94 | + workflow.add_node("initialize", self.initialize) |
| 95 | + workflow.add_node("classify_image", self.classify_image) |
| 96 | + workflow.add_node("notify", self.notify) |
| 97 | + |
| 98 | + # Create edges |
| 99 | + workflow.set_entry_point("initialize") |
| 100 | + workflow.add_conditional_edges( |
| 101 | + source="initialize", |
| 102 | + path=self.run_subtasks, |
| 103 | + path_map={ |
| 104 | + "classify_image": "classify_image", |
| 105 | + }, |
| 106 | + ) |
| 107 | + workflow.add_edge("classify_image", "notify") |
| 108 | + workflow.set_finish_point("notify") |
| 109 | + return workflow.compile( |
| 110 | + name=ImageClassifierAgent.__name__, |
| 111 | + ) |
| 112 | + |
| 113 | + def initialize(self, state: AgentState) -> AgentState: |
| 114 | + """Initialize the agent state.""" |
| 115 | + logger.info(f"Initializing state: {state}") |
| 116 | + # FIXME: retrieve urls from user request |
| 117 | + return state |
| 118 | + |
| 119 | + def run_subtasks(self, state: AgentState) -> list[Send]: |
| 120 | + """Run the subtasks for the agent.""" |
| 121 | + logger.info(f"Running subtasks with state: {state}") |
| 122 | + return [ |
| 123 | + Send( |
| 124 | + node="classify_image", |
| 125 | + arg=ClassifyImageState( |
| 126 | + prompt=state.input.prompt, |
| 127 | + file_path=state.input.file_paths[idx], |
| 128 | + ), |
| 129 | + ) |
| 130 | + for idx, _ in enumerate(state.input.file_paths) |
| 131 | + ] |
| 132 | + |
| 133 | + def classify_image(self, state: ClassifyImageState): |
| 134 | + logger.info(f"Classify file: {state.file_path}") |
| 135 | + if state.file_path.endswith((".png", ".jpg", ".jpeg")) and os.path.isfile(state.file_path): |
| 136 | + try: |
| 137 | + logger.info(f"Loading file: {state.file_path}") |
| 138 | + base64_image = load_image_to_base64(state.file_path) |
| 139 | + |
| 140 | + logger.info(f"Classifying file: {state.file_path}") |
| 141 | + result = self.classifier.predict( |
| 142 | + prompt=state.prompt, |
| 143 | + image=base64_image, |
| 144 | + llm=self.llm, |
| 145 | + ) |
| 146 | + |
| 147 | + logger.info(f"Classification result: {result.model_dump_json(indent=2)}") |
| 148 | + return { |
| 149 | + "results": [ |
| 150 | + Results( |
| 151 | + file_path=state.file_path, |
| 152 | + result=result, |
| 153 | + ), |
| 154 | + ] |
| 155 | + } |
| 156 | + except httpx.RequestError as e: |
| 157 | + logger.error(f"Error fetching web content: {e}") |
| 158 | + |
| 159 | + def notify(self, state: AgentState) -> AgentState: |
| 160 | + """Send notifications to the user.""" |
| 161 | + logger.info(f"Sending notifications with state: {state}") |
| 162 | + # Simulate sending notifications |
| 163 | + summary = {} |
| 164 | + for i, result in enumerate(state.results): |
| 165 | + summary[i] = result.model_dump() |
| 166 | + self.notifier.notify( |
| 167 | + id=state.input.id, |
| 168 | + body=summary, |
| 169 | + ) |
| 170 | + return state |
| 171 | + |
| 172 | + |
| 173 | +# For testing |
| 174 | +# graph = ImageClassifierAgent().create_graph() |
| 175 | + |
| 176 | +graph = ImageClassifierAgent( |
| 177 | + llm=AzureOpenAiWrapper().chat_model, |
| 178 | + notifier=MockNotifier(), |
| 179 | + classifier=LlmClassifier(), |
| 180 | +).create_graph() |
0 commit comments