3535from fastapi .security import HTTPAuthorizationCredentials , HTTPBearer
3636from minio import Minio
3737from pika .adapters .blocking_connection import BlockingChannel
38+ import aio_pika
39+ from aio_pika .abc import AbstractChannel
3840
3941router = APIRouter ()
4042security = HTTPBearer ()
@@ -49,7 +51,7 @@ def default(self, obj):
4951
5052async def _resubmit_file_extractors (
5153 file : FileOut ,
52- rabbitmq_client : BlockingChannel ,
54+ rabbitmq_client : AbstractChannel ,
5355 user : UserOut ,
5456 credentials : HTTPAuthorizationCredentials = Security (security ),
5557):
@@ -94,7 +96,7 @@ async def add_file_entry(
9496 user : UserOut ,
9597 fs : Minio ,
9698 es : Elasticsearch ,
97- rabbitmq_client : BlockingChannel ,
99+ rabbitmq_client : AbstractChannel ,
98100 file : Optional [io .BytesIO ] = None ,
99101 content_type : Optional [str ] = None ,
100102 public : bool = False ,
@@ -146,22 +148,28 @@ async def add_file_entry(
146148
147149 # Publish a message when indexing is complete
148150
151+
152+ # FIXED: Use aio_pika publishing
149153 message_body = {
150154 "event_type" : "file_indexed" ,
151155 "file_data" : json .loads (new_file .json ()),
152- "user" : json .loads (user .json ()),# This handles ObjectID serialization
156+ "user" : json .loads (user .json ()),
153157 "timestamp" : datetime .now ().isoformat ()
154158 }
155159
156- rabbitmq_client .basic_publish (
157- exchange = 'clowder' ,
158- routing_key = 'file_indexed_events' ,
159- body = json .dumps (message_body ).encode ('utf-8' )
160+ # Get the exchange first
161+ exchange = await rabbitmq_client .get_exchange ("clowder" )
162+
163+ # Use aio_pika publish method
164+ await exchange .publish (
165+ aio_pika .Message (
166+ body = json .dumps (message_body ).encode ('utf-8' ),
167+ content_type = "application/json" ,
168+ delivery_mode = aio_pika .DeliveryMode .PERSISTENT ,
169+ ),
170+ routing_key = "file_indexed_events" ,
160171 )
161172
162- # TODO - timing issue here, check_feed_listeners needs to happen asynchronously.
163- time .sleep (1 )
164-
165173 # Submit file job to any qualifying feeds
166174 # await check_feed_listeners(
167175 # es,
@@ -175,7 +183,7 @@ async def add_local_file_entry(
175183 new_file : FileDB ,
176184 user : UserOut ,
177185 es : Elasticsearch ,
178- rabbitmq_client : BlockingChannel ,
186+ rabbitmq_client : AbstractChannel ,
179187 content_type : Optional [str ] = None ,
180188):
181189 """Insert FileDB object into MongoDB (makes Clowder ID). Bytes are not stored in DB and versioning not supported
@@ -188,22 +196,27 @@ async def add_local_file_entry(
188196 # Add entry to the file index
189197 await index_file (es , FileOut (** new_file .dict ()))
190198 # Publish a message when indexing is complete
199+
191200 message_body = {
192201 "event_type" : "file_indexed" ,
193202 "file_data" : json .loads (new_file .json ()),
194- "user" : json .loads (user .json ()),# This handles ObjectID serialization
203+ "user" : json .loads (user .json ()),
195204 "timestamp" : datetime .now ().isoformat ()
196205 }
197206
198- rabbitmq_client .basic_publish (
199- exchange = 'clowder' ,
200- routing_key = 'file_indexed_events' ,
201- body = json .dumps (message_body ).encode ('utf-8' )
207+ # Get the exchange first
208+ exchange = await rabbitmq_client .get_exchange ("clowder" )
209+
210+ # Use aio_pika publish method
211+ await exchange .publish (
212+ aio_pika .Message (
213+ body = json .dumps (message_body ).encode ('utf-8' ),
214+ content_type = "application/json" ,
215+ delivery_mode = aio_pika .DeliveryMode .PERSISTENT ,
216+ ),
217+ routing_key = "file_indexed_events" ,
202218 )
203219
204- # TODO - timing issue here, check_feed_listeners needs to happen asynchronously.
205- time .sleep (1 )
206-
207220 # Submit file job to any qualifying feeds
208221 # await check_feed_listeners(
209222 # es,
@@ -255,7 +268,7 @@ async def update_file(
255268 file : UploadFile = File (...),
256269 es : Elasticsearch = Depends (dependencies .get_elasticsearchclient ),
257270 credentials : HTTPAuthorizationCredentials = Security (security ),
258- rabbitmq_client : BlockingChannel = Depends (dependencies .get_rabbitmq ),
271+ rabbitmq_client : AbstractChannel = Depends (dependencies .get_rabbitmq ),
259272 allow : bool = Depends (FileAuthorization ("uploader" )),
260273):
261274 # Check all connection and abort if any one of them is not available
@@ -593,7 +606,7 @@ async def post_file_extract(
593606 parameters : dict = None ,
594607 user = Depends (get_current_user ),
595608 credentials : HTTPAuthorizationCredentials = Security (security ),
596- rabbitmq_client : BlockingChannel = Depends (dependencies .get_rabbitmq ),
609+ rabbitmq_client : AbstractChannel = Depends (dependencies .get_rabbitmq ),
597610 allow : bool = Depends (FileAuthorization ("uploader" )),
598611):
599612 if extractorName is None :
@@ -620,7 +633,7 @@ async def resubmit_file_extractions(
620633 file_id : str ,
621634 user = Depends (get_current_user ),
622635 credentials : HTTPAuthorizationCredentials = Security (security ),
623- rabbitmq_client : BlockingChannel = Depends (dependencies .get_rabbitmq ),
636+ rabbitmq_client : AbstractChannel = Depends (dependencies .get_rabbitmq ),
624637 allow : bool = Depends (FileAuthorization ("editor" )),
625638):
626639 """This route will check metadata. We get the extractors run from metadata from extractors.
0 commit comments