-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathapi_server.py
More file actions
266 lines (213 loc) · 8.16 KB
/
api_server.py
File metadata and controls
266 lines (213 loc) · 8.16 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
"""
FastAPI Server for Semantic Router
Middleware API that receives data from Raspberry Pi and routes to appropriate services
Uses Google Gemini API for intelligent routing
"""
from fastapi import FastAPI, File, UploadFile, Form, HTTPException
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from typing import Optional
import base64
import logging
from semantic_router import SemanticRouter, RouterConfig, create_router_from_env
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Initialize FastAPI app
app = FastAPI(
title="PerceptEye Semantic Router",
description="Intelligent routing middleware using Google Gemini API",
version="1.0.0"
)
# Add CORS middleware for Raspberry Pi communication
app.add_middleware(
CORSMiddleware,
allow_origins=["*"], # Configure appropriately for production
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Initialize the semantic router
router = create_router_from_env()
class FrameAnalysisRequest(BaseModel):
"""Request model for frame analysis"""
image_base64: Optional[str] = None
audio_description: Optional[str] = None
image_url: Optional[str] = None
class FrameAnalysisResponse(BaseModel):
"""Response model for frame analysis"""
route: str
confidence: float
reasoning: str
error: bool = False
@app.get("/")
async def root():
"""Root endpoint"""
return {
"service": "PerceptEye Semantic Router",
"status": "running",
"version": "1.0.0"
}
@app.get("/health")
async def health_check():
"""Health check endpoint"""
return {
"status": "healthy",
"router": "operational"
}
@app.post("/analyze", response_model=FrameAnalysisResponse)
async def analyze_frame(request: FrameAnalysisRequest):
"""
Analyze a video frame and optional audio to determine routing decision
Args:
request: FrameAnalysisRequest containing image and/or audio data
Returns:
Routing decision with confidence score
"""
try:
logger.info("Received frame analysis request")
result = router.analyze_frame(
image_base64=request.image_base64,
audio_description=request.audio_description,
image_url=request.image_url
)
return result
except Exception as e:
logger.error(f"Error in analyze endpoint: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
@app.post("/route")
async def route_frame(request: FrameAnalysisRequest):
"""
Analyze frame, determine route, and call the appropriate API endpoint
Args:
request: FrameAnalysisRequest containing image and/or audio data
Returns:
Complete routing decision and API response
"""
try:
logger.info("Received routing request")
result = router.route_and_call_api(
image_base64=request.image_base64,
audio_description=request.audio_description,
image_url=request.image_url
)
return result
except Exception as e:
logger.error(f"Error in route endpoint: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
@app.post("/route/upload")
async def route_frame_upload(
image: Optional[UploadFile] = File(None),
audio: Optional[UploadFile] = File(None),
audio_description: Optional[str] = Form(None)
):
"""
Upload files directly for routing (alternative to base64)
Args:
image: Image file from camera
audio: Audio file
audio_description: Text description of audio
Returns:
Complete routing decision and API response
"""
try:
logger.info("Received file upload routing request")
# Process image
image_base64 = None
if image:
image_bytes = await image.read()
image_base64 = base64.b64encode(image_bytes).decode('utf-8')
# Process audio
audio_bytes = None
if audio:
audio_bytes = await audio.read()
result = router.route_and_call_api(
image_base64=image_base64,
audio_data=audio_bytes,
audio_description=audio_description
)
return result
except Exception as e:
logger.error(f"Error in upload route endpoint: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
@app.post("/route/face-recognition")
async def force_route_face_recognition(request: FrameAnalysisRequest):
"""Force route to face recognition + TTS API (bypass semantic routing)"""
try:
result = router._call_face_recognition_tts_api(
request.image_base64,
None,
request.audio_description
)
return {"api_response": result, "status": "success"}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.post("/route/speech")
async def force_route_speech(request: FrameAnalysisRequest):
"""DEPRECATED: Use /route/face-recognition instead. Speech is now part of Face Recognition + TTS API"""
try:
result = router._call_face_recognition_tts_api(None, None, request.audio_description)
return {"api_response": result, "status": "success"}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.post("/route/people")
async def force_route_people(request: FrameAnalysisRequest):
"""DEPRECATED: Use /route/face-recognition instead. People recognition is now part of Face Recognition + TTS API"""
try:
result = router._call_face_recognition_tts_api(request.image_base64, None, None)
return {"api_response": result, "status": "success"}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.post("/route/sign-language")
async def force_route_sign_language(request: FrameAnalysisRequest):
"""Force route to sign language API (bypass semantic routing)"""
try:
result = router._call_sign_language_api(request.image_base64)
return {"api_response": result, "status": "success"}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.post("/route/scene-description")
async def force_route_scene_description(request: FrameAnalysisRequest):
"""Force route to scene description (bypass semantic routing)
Uses Gemini to provide detailed scene description for visually impaired users.
Useful when you want to know what objects are in the environment without
face or sign language detection.
"""
try:
# Get the full routing response which includes scene description
result = router.analyze_frame(
image_base64=request.image_base64,
audio_description=request.audio_description,
image_url=request.image_url
)
# Extract scene description fields if present
if result.get("route") == "scene_description":
api_response = {
"scene_description": result.get("scene_description", ""),
"objects_detected": result.get("objects_detected", []),
"spatial_info": result.get("spatial_info", ""),
"text_detected": result.get("text_detected"),
"safety_warnings": result.get("safety_warnings", [])
}
else:
# If Gemini didn't route to scene description, still try to get a description
api_response = {
"scene_description": result.get("reasoning", "Unable to describe scene"),
"objects_detected": [],
"spatial_info": "",
"text_detected": None,
"safety_warnings": []
}
return {"api_response": api_response, "status": "success"}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
if __name__ == "__main__":
import uvicorn
# Run the server
uvicorn.run(
app,
host="0.0.0.0",
port=8001,
log_level="info"
)