|
| 1 | +from typing import List |
| 2 | +from fastapi import FastAPI, File, UploadFile |
| 3 | +from fastapi.responses import HTMLResponse |
| 4 | +import os |
| 5 | +from PIL import Image |
| 6 | +from pathlib import Path |
| 7 | +import uvicorn |
| 8 | +import glob |
| 9 | +# Add root to sys path |
| 10 | +import sys |
| 11 | +from os.path import dirname, abspath |
| 12 | +sys.path.insert(0, dirname(dirname(abspath(__file__)))) |
| 13 | + |
| 14 | +# Scripts |
| 15 | +from filesystem import make_dir, file_from_bytes |
| 16 | +from detect import detect |
| 17 | +from fastapi.responses import FileResponse |
| 18 | + |
| 19 | +app = FastAPI() |
| 20 | + |
| 21 | +images = ('bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp') |
| 22 | +videos = ('asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv') |
| 23 | + |
| 24 | + |
| 25 | +# Detect endpoint |
| 26 | +@app.post("/detect/") |
| 27 | +async def create_files(file: UploadFile = File(...)): |
| 28 | + ''' |
| 29 | + Receives a list of files and saves them to model paths |
| 30 | + ''' |
| 31 | + # Create uploads directory if not exists |
| 32 | + dir_path = make_dir(dir_path=f'{os.getenv("UPLOADS_PATH")}/model/tmp/') |
| 33 | + |
| 34 | + # Create outputs directory if not exists |
| 35 | + output_path = make_dir(dir_path=f'{os.getenv("MODEL_OUTPUTS")}', use_base_path=False) |
| 36 | + |
| 37 | + contents = await file.read() |
| 38 | + with open(os.path.join(dir_path, file.filename), "wb") as fp: |
| 39 | + fp.write(contents) |
| 40 | + # Yolo Config Dict |
| 41 | + FILE = Path(__file__).resolve() |
| 42 | + ROOT = FILE.parents[0] # YOLOv5 root directory |
| 43 | + if str(ROOT) not in sys.path: |
| 44 | + sys.path.append(str(ROOT)) # add ROOT to PATH |
| 45 | + ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative |
| 46 | + config = { |
| 47 | + 'weights': f'{ROOT}/yolov5s.pt', |
| 48 | + 'source': f'{dir_path}/{file.filename}', |
| 49 | + 'device': 'cpu', # or gpu number: 0,1,2,3 |
| 50 | + 'view_img': False, |
| 51 | + 'classes': None, |
| 52 | + 'update': False, |
| 53 | + 'nosave': False, |
| 54 | + 'project': f'{ROOT}/{output_path}', |
| 55 | + 'name': 'exp', |
| 56 | + 'exist_ok': False, |
| 57 | + 'dnn': False, |
| 58 | + 'data': f'{ROOT}/data/coco128.yaml', |
| 59 | + 'half': False, |
| 60 | + 'imgsz': (640, 640), |
| 61 | + 'visualize': False, |
| 62 | + 'augment': False, |
| 63 | + 'conf_thres': 0.25, |
| 64 | + 'iou_thres': 0.45, |
| 65 | + 'agnostic_nms': False, |
| 66 | + 'max_det': 1000, |
| 67 | + 'save_crop': False, |
| 68 | + 'line_thickness': 3, |
| 69 | + 'save_txt': False, |
| 70 | + 'view_img': False, |
| 71 | + 'hide_labels': False, |
| 72 | + 'hide_conf': False, |
| 73 | + 'save_conf': False, |
| 74 | + } |
| 75 | + # Yolo Detect Objects |
| 76 | + detect(config) |
| 77 | + |
| 78 | + # Image with objects path |
| 79 | + output_path = make_dir(dir_path=f'{os.getenv("MODEL_OUTPUTS")}') |
| 80 | + # Return image with objects as response |
| 81 | + for file_name in os.listdir(output_path): |
| 82 | + if file_name.split('.')[-1] == 'mp4': |
| 83 | + response = FileResponse(f'{output_path}/{file_name}', media_type='video/mp4') |
| 84 | + response.headers["Content-Disposition"] = "attachment; filename=results.mp4" |
| 85 | + return response |
| 86 | + else: |
| 87 | + break |
| 88 | + return FileResponse(str(output_path / file.filename)) |
| 89 | + |
| 90 | + |
| 91 | +# Home endpoint, returns simple html |
| 92 | +@app.get("/") |
| 93 | +async def main(): |
| 94 | + content = """ |
| 95 | +<!-- Font Awesome --> |
| 96 | +<link rel="stylesheet" href="https://use.fontawesome.com/releases/v5.8.2/css/all.css"> |
| 97 | +<!-- Google Fonts --> |
| 98 | +<link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700&display=swap"> |
| 99 | +<!-- Bootstrap core CSS --> |
| 100 | +<link href="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/4.5.0/css/bootstrap.min.css" rel="stylesheet"> |
| 101 | +<!-- Material Design Bootstrap --> |
| 102 | +<link href="https://cdnjs.cloudflare.com/ajax/libs/mdbootstrap/4.19.1/css/mdb.min.css" rel="stylesheet"> |
| 103 | +<body> |
| 104 | +<form class="text-center border border-light p-5" action="/detect/" enctype="multipart/form-data" method="post"> |
| 105 | +<h1>Detecta Objetos en Imágenes</h1> |
| 106 | +<p> Esta demo usa yolov5 y fastapi</p> |
| 107 | +<div> |
| 108 | + <label for="files" class="btn ">Seleccionar Imagen</label> |
| 109 | + <input name="file" id="files" style="visibility:hidden;" type="file"> |
| 110 | + <label for="btnUpload" class="btn btn-primary">Detectar Objetos!</label> |
| 111 | + <input name="btnUpload" id="btnUpload" style="visibility:hidden;" type="submit"> |
| 112 | +</div> |
| 113 | + """ |
| 114 | + return HTMLResponse(content=content) |
| 115 | + |
| 116 | +if __name__ == "__main__": |
| 117 | + # Fix known issue urllib.error.HTTPError 403: rate limit exceeded https://github.com/ultralytics/yolov5/pull/7210 |
| 118 | + # colors = [tuple([random.randint(0, 255) for _ in range(3)]) for _ in range(100)] # for bbox plotting |
| 119 | + uvicorn.run("deploy_server:app", host='0.0.0.0', port=5051, reload=True) |
| 120 | + |
| 121 | +# from pathlib import Path |
| 122 | +# from fastapi import FastAPI, UploadFile, File, Response, Header |
| 123 | +# from numpy import size |
| 124 | +# from pydantic import AnyUrl |
| 125 | +# from starlette.responses import StreamingResponse, FileResponse |
| 126 | +# from PIL import Image |
| 127 | +# import io |
| 128 | +# import uvicorn |
| 129 | +# import torch |
| 130 | +# import base64 |
| 131 | +# import cv2 |
| 132 | +# import random |
| 133 | +# import aiofiles |
| 134 | + |
| 135 | + |
| 136 | +# app = FastAPI(title='YOLOv5 inference Server') |
| 137 | +# DETECTION_URL = "/inference/" |
| 138 | + |
| 139 | + |
| 140 | +# def get_yolov5(): |
| 141 | +# # model = torch.hub.load('./yolov5', 'custom', path='./model/best.pt', source='local') |
| 142 | +# torch.hub._validate_not_a_forked_repo = lambda a, b, c: True |
| 143 | +# model = torch.hub.load("ultralytics/yolov5", "yolov5s", force_reload=True) # force_reload to recache |
| 144 | + |
| 145 | +# return model |
| 146 | + |
| 147 | + |
| 148 | +# model = get_yolov5() |
| 149 | + |
| 150 | + |
| 151 | +# @app.post( |
| 152 | +# DETECTION_URL + 'image', |
| 153 | +# responses={ |
| 154 | +# 200: { |
| 155 | +# "content": {"image/png": {}}, |
| 156 | +# } |
| 157 | +# }, |
| 158 | +# response_class=Response, |
| 159 | +# ) |
| 160 | +# async def predict(files: list[UploadFile] = File(...)): |
| 161 | +# for file in files: |
| 162 | +# results = model(Image.open(io.BytesIO(await file.read())), size=640) |
| 163 | +# results.render() |
| 164 | + |
| 165 | +# for im in results.imgs: |
| 166 | +# buffered = io.BytesIO() |
| 167 | +# im_base64 = Image.fromarray(im) |
| 168 | +# im_base64.save(buffered, format="JPEG") |
| 169 | +# # buffered.seek(0) |
| 170 | +# return Response(content=buffered.getvalue(), media_type="image/png") |
| 171 | + |
| 172 | + |
| 173 | +# @app.post(DETECTION_URL + 'upload_video') |
| 174 | +# async def upload_video(file: UploadFile = File(...)): |
| 175 | +# async with aiofiles.open(Path("./input_video.mp4"), 'wb') as out_file: |
| 176 | +# content = await file.read() # async read |
| 177 | +# await out_file.write(content) # async write |
| 178 | +# _handle_inference_video() |
| 179 | + |
| 180 | + |
| 181 | +# @app.get(DETECTION_URL + 'video') |
| 182 | +# async def predict_video(): |
| 183 | +# response = FileResponse("./input_video.mp4", media_type='video/mp4') |
| 184 | +# response.headers["Content-Disposition"] = "attachment; filename=video.mp4" |
| 185 | +# return response |
| 186 | + |
| 187 | + |
| 188 | +# def _handle_inference_video(): |
| 189 | +# cap = cv2.VideoCapture('input_video.mp4') |
| 190 | +# # frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) |
| 191 | +# ret_val, img0 = cap.read() |
| 192 | +# while not ret_val: |
| 193 | +# count += 1 |
| 194 | +# cap.release() |
| 195 | +# if count == nf: # last video |
| 196 | +# raise StopIteration |
| 197 | +# path = files[count] |
| 198 | +# new_video(path) |
| 199 | +# ret_val, img0 = cap.read() |
| 200 | + |
| 201 | +# frame += 1 |
| 202 | +# s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ' |
| 203 | + |
| 204 | + |
| 205 | +# results = model('input_video.mp4', size=320) |
| 206 | +# results.save(save_dir='runs/detect/exp') |
| 207 | + |
| 208 | + |
| 209 | +# def results_to_json(results, model): |
| 210 | +# ''' Converts yolo model output to json (list of list of dicts)''' |
| 211 | +# return [ |
| 212 | +# [ |
| 213 | +# { |
| 214 | +# "class": int(pred[5]), |
| 215 | +# "class_name": model.model.names[int(pred[5])], |
| 216 | +# "bbox": [int(x) for x in pred[:4].tolist()], # convert bbox results to int from float |
| 217 | +# "confidence": float(pred[4]), |
| 218 | +# } |
| 219 | +# for pred in result |
| 220 | +# ] |
| 221 | +# for result in results.xyxy |
| 222 | +# ] |
| 223 | + |
| 224 | + |
| 225 | +# def plot_one_box(x, im, color=(128, 128, 128), label=None, line_thickness=3): |
| 226 | +# # Directly copied from: https://github.com/ultralytics/yolov5/blob/cd540d8625bba8a05329ede3522046ee53eb349d/utils/plots.py |
| 227 | +# # Plots one bounding box on image 'im' using OpenCV |
| 228 | +# assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to plot_on_box() input image.' |
| 229 | +# tl = line_thickness or round(0.002 * (im.shape[0] + im.shape[1]) / 2) + 1 # line/font thickness |
| 230 | +# c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3])) |
| 231 | +# cv2.rectangle(im, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA) |
| 232 | +# if label: |
| 233 | +# tf = max(tl - 1, 1) # font thickness |
| 234 | +# t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] |
| 235 | +# c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3 |
| 236 | +# cv2.rectangle(im, c1, c2, color, -1, cv2.LINE_AA) # filled |
| 237 | +# cv2.putText(im, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) |
| 238 | + |
| 239 | + |
| 240 | +# def base64EncodeImage(img): |
| 241 | +# ''' Takes an input image and returns a base64 encoded string representation of that image (jpg format)''' |
| 242 | +# _, im_arr = cv2.imencode('.jpg', img) |
| 243 | +# im_b64 = base64.b64encode(im_arr.tobytes()).decode('utf-8') |
| 244 | + |
| 245 | +# return im_b64 |
| 246 | + |
| 247 | + |
| 248 | +# if __name__ == "__main__": |
| 249 | +# # Fix known issue urllib.error.HTTPError 403: rate limit exceeded https://github.com/ultralytics/yolov5/pull/7210 |
| 250 | +# # colors = [tuple([random.randint(0, 255) for _ in range(3)]) for _ in range(100)] # for bbox plotting |
| 251 | +# uvicorn.run("deploy_server:app", host='127.0.0.1', port=7777, reload=True) |
0 commit comments