|
| 1 | +""" |
| 2 | +Fast API interface for processing images through the localization and classification pipelines. |
| 3 | +""" |
| 4 | + |
| 5 | +import enum |
| 6 | +import time |
| 7 | + |
| 8 | +import fastapi |
| 9 | +import pydantic |
| 10 | +from rich import print |
| 11 | + |
| 12 | +from ..common.logs import logger # noqa: F401 |
| 13 | +from . import settings |
| 14 | +from .models.classification import ( |
| 15 | + APIMothClassifier, |
| 16 | + MothClassifierBinary, |
| 17 | + MothClassifierGlobal, |
| 18 | + MothClassifierPanama, |
| 19 | + MothClassifierPanama2024, |
| 20 | + MothClassifierQuebecVermont, |
| 21 | + MothClassifierTuringAnguilla, |
| 22 | + MothClassifierTuringCostaRica, |
| 23 | + MothClassifierUKDenmark, |
| 24 | +) |
| 25 | +from .models.localization import APIMothDetector |
| 26 | +from .schemas import Detection, SourceImage |
| 27 | + |
| 28 | +app = fastapi.FastAPI() |
| 29 | + |
| 30 | + |
| 31 | +class SourceImageRequest(pydantic.BaseModel): |
| 32 | + model_config = pydantic.ConfigDict(extra="ignore") |
| 33 | + |
| 34 | + # @TODO bring over new SourceImage & b64 validation from the lepsAI repo |
| 35 | + id: str |
| 36 | + url: str |
| 37 | + # b64: str | None = None |
| 38 | + |
| 39 | + |
| 40 | +class SourceImageResponse(pydantic.BaseModel): |
| 41 | + model_config = pydantic.ConfigDict(extra="ignore") |
| 42 | + |
| 43 | + id: str |
| 44 | + url: str |
| 45 | + |
| 46 | + |
| 47 | +PIPELINE_CHOICES = { |
| 48 | + "panama_moths_2023": MothClassifierPanama, |
| 49 | + "panama_moths_2024": MothClassifierPanama2024, |
| 50 | + "quebec_vermont_moths_2023": MothClassifierQuebecVermont, |
| 51 | + "uk_denmark_moths_2023": MothClassifierUKDenmark, |
| 52 | + "costa_rica_moths_turing_2024": MothClassifierTuringCostaRica, |
| 53 | + "anguilla_moths_turing_2024": MothClassifierTuringAnguilla, |
| 54 | + "global_moths_2024": MothClassifierGlobal, |
| 55 | +} |
| 56 | +_pipeline_choices = dict(zip(PIPELINE_CHOICES.keys(), list(PIPELINE_CHOICES.keys()))) |
| 57 | + |
| 58 | + |
| 59 | +PipelineChoice = enum.Enum("PipelineChoice", _pipeline_choices) |
| 60 | + |
| 61 | + |
| 62 | +class PipelineRequest(pydantic.BaseModel): |
| 63 | + pipeline: PipelineChoice |
| 64 | + source_images: list[SourceImageRequest] |
| 65 | + |
| 66 | + |
| 67 | +class PipelineResponse(pydantic.BaseModel): |
| 68 | + pipeline: PipelineChoice |
| 69 | + total_time: float |
| 70 | + source_images: list[SourceImageResponse] |
| 71 | + detections: list[Detection] |
| 72 | + |
| 73 | + |
| 74 | +@app.get("/") |
| 75 | +async def root(): |
| 76 | + return fastapi.responses.RedirectResponse("/docs") |
| 77 | + |
| 78 | + |
| 79 | +@app.post("/pipeline/process") |
| 80 | +@app.post("/pipeline/process/") |
| 81 | +async def process(data: PipelineRequest) -> PipelineResponse: |
| 82 | + # Ensure that the source images are unique, filter out duplicates |
| 83 | + source_images_index = { |
| 84 | + source_image.id: source_image for source_image in data.source_images |
| 85 | + } |
| 86 | + incoming_source_images = list(source_images_index.values()) |
| 87 | + if len(incoming_source_images) != len(data.source_images): |
| 88 | + logger.warning( |
| 89 | + f"Removed {len(data.source_images) - len(incoming_source_images)} duplicate source images" |
| 90 | + ) |
| 91 | + |
| 92 | + source_image_results = [ |
| 93 | + SourceImageResponse(**image.model_dump()) for image in incoming_source_images |
| 94 | + ] |
| 95 | + source_images = [ |
| 96 | + SourceImage(**image.model_dump()) for image in incoming_source_images |
| 97 | + ] |
| 98 | + |
| 99 | + start_time = time.time() |
| 100 | + detector = APIMothDetector( |
| 101 | + source_images=source_images, |
| 102 | + batch_size=settings.localization_batch_size, |
| 103 | + num_workers=settings.num_workers, |
| 104 | + # single=True if len(source_images) == 1 else False, |
| 105 | + single=True, # @TODO solve issues with reading images in multiprocessing |
| 106 | + ) |
| 107 | + detector_results = detector.run() |
| 108 | + num_pre_filter = len(detector_results) |
| 109 | + |
| 110 | + filter = MothClassifierBinary( |
| 111 | + source_images=source_images, |
| 112 | + detections=detector_results, |
| 113 | + batch_size=settings.classification_batch_size, |
| 114 | + num_workers=settings.num_workers, |
| 115 | + # single=True if len(detector_results) == 1 else False, |
| 116 | + single=True, # @TODO solve issues with reading images in multiprocessing |
| 117 | + filter_results=False, # Only save results with the positive_binary_label, @TODO make this configurable from request |
| 118 | + ) |
| 119 | + filter.run() |
| 120 | + # all_binary_classifications = filter.results |
| 121 | + |
| 122 | + # Compare num detections with num moth detections |
| 123 | + num_post_filter = len(filter.results) |
| 124 | + logger.info( |
| 125 | + f"Binary classifier returned {num_post_filter} out of {num_pre_filter} detections" |
| 126 | + ) |
| 127 | + |
| 128 | + # Filter results based on positive_binary_label |
| 129 | + moth_detections = [] |
| 130 | + non_moth_detections = [] |
| 131 | + for detection in filter.results: |
| 132 | + for classification in detection.classifications: |
| 133 | + if classification.classification == filter.positive_binary_label: |
| 134 | + moth_detections.append(detection) |
| 135 | + elif classification.classification == filter.negative_binary_label: |
| 136 | + non_moth_detections.append(detection) |
| 137 | + break |
| 138 | + |
| 139 | + logger.info( |
| 140 | + f"Sending {len(moth_detections)} out of {num_pre_filter} detections to the classifier" |
| 141 | + ) |
| 142 | + |
| 143 | + Classifier = PIPELINE_CHOICES[data.pipeline.value] |
| 144 | + classifier: APIMothClassifier = Classifier( |
| 145 | + source_images=source_images, |
| 146 | + detections=moth_detections, |
| 147 | + batch_size=settings.classification_batch_size, |
| 148 | + num_workers=settings.num_workers, |
| 149 | + # single=True if len(filtered_detections) == 1 else False, |
| 150 | + single=True, # @TODO solve issues with reading images in multiprocessing |
| 151 | + ) |
| 152 | + classifier.run() |
| 153 | + end_time = time.time() |
| 154 | + seconds_elapsed = float(end_time - start_time) |
| 155 | + |
| 156 | + # Return all detections, including those that were not classified as moths |
| 157 | + all_detections = classifier.results + non_moth_detections |
| 158 | + |
| 159 | + logger.info( |
| 160 | + f"Processed {len(source_images)} images in {seconds_elapsed:.2f} seconds" |
| 161 | + ) |
| 162 | + logger.info(f"Returning {len(all_detections)} detections") |
| 163 | + print(all_detections) |
| 164 | + |
| 165 | + # If the number of detections is greater than 100, its suspicious. Log it. |
| 166 | + if len(all_detections) > 100: |
| 167 | + logger.warning( |
| 168 | + f"Detected {len(all_detections)} detections. This is suspicious and may contain duplicates." |
| 169 | + ) |
| 170 | + |
| 171 | + response = PipelineResponse( |
| 172 | + pipeline=data.pipeline, |
| 173 | + source_images=source_image_results, |
| 174 | + detections=all_detections, |
| 175 | + total_time=seconds_elapsed, |
| 176 | + ) |
| 177 | + return response |
| 178 | + |
| 179 | + |
| 180 | +# Future methods |
| 181 | + |
| 182 | +# batch processing |
| 183 | +# async def process_batch(data: PipelineRequest) -> PipelineResponse: |
| 184 | +# pass |
| 185 | + |
| 186 | +# render image crops and bboxes on top of the original image |
| 187 | +# async def render(data: PipelineRequest) -> PipelineResponse: |
| 188 | +# pass |
| 189 | + |
| 190 | + |
| 191 | +if __name__ == "__main__": |
| 192 | + import uvicorn |
| 193 | + |
| 194 | + uvicorn.run(app, host="0.0.0.0", port=2000) |
0 commit comments