11import logging
22from contextlib import asynccontextmanager
33from fastapi import FastAPI
4+ from fastapi .responses import JSONResponse
45from helpers import save_document
56from request_models import CreateSessionRequest , PromptRequest , SummaryRequest , QuizRequest , FlashcardRequest
67from llm import StudyLLM
8+ from prometheus_fastapi_instrumentator import Instrumentator
79
810# Configure logging
911logging .basicConfig (level = logging .INFO )
@@ -39,9 +41,16 @@ async def lifespan(_):
3941 lifespan = lifespan
4042)
4143
44+ Instrumentator (
45+ excluded_handlers = ['/metrics' ],
46+ should_group_status_codes = False ,
47+ ).instrument (app ).expose (app )
48+
49+
4250# llm_instances["dummy"] = StudyLLM("./documents/example/W07_Microservices_and_Scalable_Architectures.pdf") # TODO: remove
4351# llm_instances["dummy2"] = StudyLLM("./documents/example/dummy_knowledge.txt") # TODO: remove
4452
53+ # Auxiliary Endpoints
4554@app .get ("/health" )
4655async def health_check ():
4756 """Check the health of the service and its dependencies."""
@@ -51,6 +60,7 @@ async def health_check():
5160 return {"status" : "unhealthy" , "error" : str (e )}
5261
5362
63+ # AI Tasks Endpoints
5464@app .post ("/session/load" )
5565async def load_session (data : CreateSessionRequest ):
5666 """
@@ -82,7 +92,7 @@ async def receive_prompt(data: PromptRequest):
8292 if data .session_id not in llm_instances :
8393 error_msg = f"Session { data .session_id } not found. Please ensure the document was processed successfully."
8494 logger .error (error_msg )
85- return {"response" : f"ERROR: { error_msg } " }
95+ return JSONResponse ( status_code = 404 , content = {"response" : f"ERROR: { error_msg } " })
8696
8797 logger .info (f"Processing chat request for session { data .session_id } " )
8898 response = llm_instances [data .session_id ].prompt (data .message )
0 commit comments