|
10 | 10 | import torch |
11 | 11 |
|
12 | 12 | from ..logger import get_logger |
13 | | -from ..core.app import model_manager, request_count, start_time |
| 13 | +from ..logger.logger import get_request_count, get_uptime_seconds |
| 14 | +from ..core.app import model_manager, start_time |
14 | 15 | from ..ui.banners import print_system_resources |
15 | 16 | from ..config import system_instructions |
16 | 17 |
|
@@ -50,6 +51,24 @@ def get_gpu_memory() -> Optional[Tuple[int, int]]: |
50 | 51 | return None |
51 | 52 |
|
52 | 53 |
|
| 54 | +def get_gpu_info() -> Optional[Dict[str, Any]]: |
| 55 | + """Get detailed GPU information including memory and device name""" |
| 56 | + try: |
| 57 | + gpu_mem = get_gpu_memory() |
| 58 | + if gpu_mem: |
| 59 | + total_gpu, free_gpu = gpu_mem |
| 60 | + return { |
| 61 | + "total_memory": total_gpu, |
| 62 | + "free_memory": free_gpu, |
| 63 | + "used_memory": total_gpu - free_gpu, |
| 64 | + "device": torch.cuda.get_device_name(0) |
| 65 | + } |
| 66 | + return None |
| 67 | + except Exception as e: |
| 68 | + logger.debug(f"Failed to get GPU info: {str(e)}") |
| 69 | + return None |
| 70 | + |
| 71 | + |
53 | 72 | @router.post("/system/instructions") |
54 | 73 | async def update_system_instructions(request: SystemInstructionsRequest) -> Dict[str, str]: |
55 | 74 | """Update system instructions""" |
@@ -84,35 +103,32 @@ async def reset_system_instructions(model_id: Optional[str] = None) -> Dict[str, |
84 | 103 |
|
85 | 104 |
|
86 | 105 | @router.get("/system/info", response_model=SystemInfoResponse) |
87 | | -async def system_info() -> SystemInfoResponse: |
88 | | - """Get detailed system information""" |
| 106 | +async def get_system_info(): |
| 107 | + """Get system information including CPU, memory, GPU usage, and server stats""" |
89 | 108 | try: |
90 | | - cpu_usage = psutil.cpu_percent() |
| 109 | + # Get CPU and memory usage |
| 110 | + cpu_percent = psutil.cpu_percent() |
91 | 111 | memory = psutil.virtual_memory() |
92 | | - gpu_info = None |
| 112 | + memory_percent = memory.percent |
93 | 113 |
|
94 | | - if torch.cuda.is_available(): |
95 | | - gpu_mem = get_gpu_memory() |
96 | | - if gpu_mem: |
97 | | - total_gpu, free_gpu = gpu_mem |
98 | | - gpu_info = { |
99 | | - "total_memory": total_gpu, |
100 | | - "free_memory": free_gpu, |
101 | | - "used_memory": total_gpu - free_gpu, |
102 | | - "device": torch.cuda.get_device_name(0) |
103 | | - } |
| 114 | + # Get GPU info if available |
| 115 | + gpu_info = get_gpu_info() if torch.cuda.is_available() else None |
104 | 116 |
|
| 117 | + # Get server stats |
| 118 | + uptime = time.time() - start_time |
| 119 | + |
| 120 | + # Return combined info |
105 | 121 | return SystemInfoResponse( |
106 | | - cpu_usage=cpu_usage, |
107 | | - memory_usage=memory.percent, |
| 122 | + cpu_usage=cpu_percent, |
| 123 | + memory_usage=memory_percent, |
108 | 124 | gpu_info=gpu_info, |
109 | 125 | active_model=model_manager.current_model, |
110 | | - uptime=time.time() - start_time, |
111 | | - request_count=request_count |
| 126 | + uptime=uptime, |
| 127 | + request_count=get_request_count() # Use the function from logger.logger instead |
112 | 128 | ) |
113 | 129 | except Exception as e: |
114 | | - logger.error(f"Failed to get system info: {str(e)}") |
115 | | - raise HTTPException(status_code=500, detail=str(e)) |
| 130 | + logger.error(f"Error getting system info: {str(e)}") |
| 131 | + raise HTTPException(status_code=500, detail=f"Error getting system info: {str(e)}") |
116 | 132 |
|
117 | 133 |
|
118 | 134 | @router.get("/health") |
|
0 commit comments