@@ -6,6 +6,7 @@ use std::time::Duration;
66
77use anyhow:: Result ;
88use bitflags:: bitflags;
9+ use cainome:: cairo_serde:: CairoSerde ;
910use dojo_utils:: provider as provider_utils;
1011use dojo_world:: contracts:: world:: WorldContractReader ;
1112use futures_util:: future:: { join_all, try_join_all} ;
@@ -17,7 +18,7 @@ use starknet::core::types::{
1718} ;
1819use starknet:: core:: utils:: get_selector_from_name;
1920use starknet:: providers:: Provider ;
20- use starknet_crypto:: Felt ;
21+ use starknet_crypto:: { poseidon_hash_many , Felt } ;
2122use tokio:: sync:: broadcast:: Sender ;
2223use tokio:: sync:: mpsc:: Sender as BoundedSender ;
2324use tokio:: sync:: Semaphore ;
@@ -207,6 +208,9 @@ pub struct ParallelizedEvent {
207208 pub event : Event ,
208209}
209210
211+ type TaskPriority = usize ;
212+ type TaskId = u64 ;
213+
210214#[ allow( missing_debug_implementations) ]
211215pub struct Engine < P : Provider + Send + Sync + std:: fmt:: Debug + ' static > {
212216 world : Arc < WorldContractReader < P > > ,
@@ -216,7 +220,7 @@ pub struct Engine<P: Provider + Send + Sync + std::fmt::Debug + 'static> {
216220 config : EngineConfig ,
217221 shutdown_tx : Sender < ( ) > ,
218222 block_tx : Option < BoundedSender < u64 > > ,
219- tasks : HashMap < u64 , Vec < ( ContractType , ParallelizedEvent ) > > ,
223+ tasks : BTreeMap < TaskPriority , HashMap < TaskId , Vec < ( ContractType , ParallelizedEvent ) > > > ,
220224 contracts : Arc < HashMap < Felt , ContractType > > ,
221225}
222226
@@ -250,7 +254,7 @@ impl<P: Provider + Send + Sync + std::fmt::Debug + 'static> Engine<P> {
250254 shutdown_tx,
251255 block_tx,
252256 contracts,
253- tasks : HashMap :: new ( ) ,
257+ tasks : BTreeMap :: new ( ) ,
254258 }
255259 }
256260
@@ -596,44 +600,72 @@ impl<P: Provider + Send + Sync + std::fmt::Debug + 'static> Engine<P> {
596600 }
597601
598602 async fn process_tasks ( & mut self ) -> Result < ( ) > {
599- // We use a semaphore to limit the number of concurrent tasks
600603 let semaphore = Arc :: new ( Semaphore :: new ( self . config . max_concurrent_tasks ) ) ;
601604
602- // Run all tasks concurrently
603- let mut handles = Vec :: new ( ) ;
604- for ( task_id, events) in self . tasks . drain ( ) {
605- let db = self . db . clone ( ) ;
606- let world = self . world . clone ( ) ;
607- let semaphore = semaphore. clone ( ) ;
608- let processors = self . processors . clone ( ) ;
609-
610- let event_processor_config = self . config . event_processor_config . clone ( ) ;
611- handles. push ( tokio:: spawn ( async move {
612- let _permit = semaphore. acquire ( ) . await ?;
613- let mut local_db = db. clone ( ) ;
614- for ( contract_type, ParallelizedEvent { event_id, event, block_number, block_timestamp } ) in events {
615- let contract_processors = processors. get_event_processor ( contract_type) ;
616- if let Some ( processors) = contract_processors. get ( & event. keys [ 0 ] ) {
617-
618- let processor = processors. iter ( ) . find ( |p| p. validate ( & event) ) . expect ( "Must find atleast one processor for the event" ) ;
619-
620- debug ! ( target: LOG_TARGET , event_name = processor. event_key( ) , task_id = %task_id, "Processing parallelized event." ) ;
621-
622- if let Err ( e) = processor
623- . process ( & world, & mut local_db, block_number, block_timestamp, & event_id, & event, & event_processor_config)
624- . await
625- {
626- error ! ( target: LOG_TARGET , event_name = processor. event_key( ) , error = %e, task_id = %task_id, "Processing parallelized event." ) ;
605+ // Process each priority level sequentially
606+ for ( priority, task_group) in std:: mem:: take ( & mut self . tasks ) {
607+ let mut handles = Vec :: new ( ) ;
608+
609+ // Process all tasks within this priority level concurrently
610+ for ( task_id, events) in task_group {
611+ let db = self . db . clone ( ) ;
612+ let world = self . world . clone ( ) ;
613+ let semaphore = semaphore. clone ( ) ;
614+ let processors = self . processors . clone ( ) ;
615+ let event_processor_config = self . config . event_processor_config . clone ( ) ;
616+
617+ handles. push ( tokio:: spawn ( async move {
618+ let _permit = semaphore. acquire ( ) . await ?;
619+ let mut local_db = db. clone ( ) ;
620+
621+ // Process all events for this task sequentially
622+ for ( contract_type, event) in events {
623+ let contract_processors = processors. get_event_processor ( contract_type) ;
624+ if let Some ( processors) = contract_processors. get ( & event. event . keys [ 0 ] ) {
625+ let processor = processors
626+ . iter ( )
627+ . find ( |p| p. validate ( & event. event ) )
628+ . expect ( "Must find at least one processor for the event" ) ;
629+
630+ debug ! (
631+ target: LOG_TARGET ,
632+ event_name = processor. event_key( ) ,
633+ task_id = %task_id,
634+ priority = %priority,
635+ "Processing parallelized event."
636+ ) ;
637+
638+ if let Err ( e) = processor
639+ . process (
640+ & world,
641+ & mut local_db,
642+ event. block_number ,
643+ event. block_timestamp ,
644+ & event. event_id ,
645+ & event. event ,
646+ & event_processor_config,
647+ )
648+ . await
649+ {
650+ error ! (
651+ target: LOG_TARGET ,
652+ event_name = processor. event_key( ) ,
653+ error = %e,
654+ task_id = %task_id,
655+ priority = %priority,
656+ "Processing parallelized event."
657+ ) ;
658+ }
627659 }
628660 }
629- }
630661
631- Ok :: < _ , anyhow:: Error > ( ( ) )
632- } ) ) ;
633- }
662+ Ok :: < _ , anyhow:: Error > ( ( ) )
663+ } ) ) ;
664+ }
634665
635- // Join all tasks
636- try_join_all ( handles) . await ?;
666+ // Wait for all tasks in this priority level to complete before moving to next priority
667+ try_join_all ( handles) . await ?;
668+ }
637669
638670 Ok ( ( ) )
639671 }
@@ -802,14 +834,7 @@ impl<P: Provider + Send + Sync + std::fmt::Debug + 'static> Engine<P> {
802834 contract_type : ContractType ,
803835 ) -> Result < ( ) > {
804836 if self . config . flags . contains ( IndexingFlags :: RAW_EVENTS ) {
805- match contract_type {
806- ContractType :: WORLD => {
807- self . db . store_event ( event_id, event, transaction_hash, block_timestamp) ?;
808- }
809- // ERC events needs to be processed inside there respective processor
810- // we store transfer events for ERC contracts regardless of this flag
811- ContractType :: ERC20 | ContractType :: ERC721 => { }
812- }
837+ self . db . store_event ( event_id, event, transaction_hash, block_timestamp) ?;
813838 }
814839
815840 let event_key = event. keys [ 0 ] ;
@@ -856,30 +881,53 @@ impl<P: Provider + Send + Sync + std::fmt::Debug + 'static> Engine<P> {
856881 . find ( |p| p. validate ( event) )
857882 . expect ( "Must find atleast one processor for the event" ) ;
858883
859- let task_identifier = match processor. event_key ( ) . as_str ( ) {
884+ let ( task_priority, task_identifier) = match processor. event_key ( ) . as_str ( ) {
885+ "ModelRegistered" | "EventRegistered" => {
886+ let mut hasher = DefaultHasher :: new ( ) ;
887+ event. keys . iter ( ) . for_each ( |k| k. hash ( & mut hasher) ) ;
888+ let hash = hasher. finish ( ) ;
889+ ( 0usize , hash) // Priority 0 (highest) for model/event registration
890+ }
860891 "StoreSetRecord" | "StoreUpdateRecord" | "StoreUpdateMember" | "StoreDelRecord" => {
861892 let mut hasher = DefaultHasher :: new ( ) ;
862- // model selector
863893 event. keys [ 1 ] . hash ( & mut hasher) ;
864- // entity id
865894 event. keys [ 2 ] . hash ( & mut hasher) ;
866- hasher. finish ( )
895+ let hash = hasher. finish ( ) ;
896+ ( 2usize , hash) // Priority 2 (lower) for store operations
897+ }
898+ "EventEmitted" => {
899+ let mut hasher = DefaultHasher :: new ( ) ;
900+
901+ let keys = Vec :: < Felt > :: cairo_deserialize ( & event. data , 0 ) . unwrap_or_else ( |e| {
902+ panic ! ( "Expected EventEmitted keys to be well formed: {:?}" , e) ;
903+ } ) ;
904+
905+ // selector
906+ event. keys [ 1 ] . hash ( & mut hasher) ;
907+ // entity id
908+ let entity_id = poseidon_hash_many ( & keys) ;
909+ entity_id. hash ( & mut hasher) ;
910+
911+ let hash = hasher. finish ( ) ;
912+ ( 2usize , hash) // Priority 2 for event messages
867913 }
868- _ => 0 ,
914+ _ => ( 0 , 0 ) , // No parallelization for other events
869915 } ;
870916
871- // if we have a task identifier, we queue the event to be parallelized
872917 if task_identifier != 0 {
873- self . tasks . entry ( task_identifier) . or_default ( ) . push ( (
874- contract_type,
875- ParallelizedEvent {
876- event_id : event_id. to_string ( ) ,
877- event : event. clone ( ) ,
878- block_number,
879- block_timestamp,
880- } ,
881- ) ) ;
918+ self . tasks . entry ( task_priority) . or_default ( ) . entry ( task_identifier) . or_default ( ) . push (
919+ (
920+ contract_type,
921+ ParallelizedEvent {
922+ event_id : event_id. to_string ( ) ,
923+ event : event. clone ( ) ,
924+ block_number,
925+ block_timestamp,
926+ } ,
927+ ) ,
928+ ) ;
882929 } else {
930+ // Process non-parallelized events immediately
883931 // if we dont have a task identifier, we process the event immediately
884932 if processor. validate ( event) {
885933 if let Err ( e) = processor
0 commit comments