@@ -10,13 +10,17 @@ use std::sync::atomic::Ordering;
1010use futures:: future:: BoxFuture ;
1111use vortex_error:: vortex_panic;
1212
13+ use crate :: runtime:: blocking:: BlockingRuntime ;
1314use crate :: runtime:: AbortHandleRef ;
15+ use crate :: runtime:: current:: CurrentThreadRuntime ;
16+ use crate :: runtime:: current:: CurrentThreadWorkerPool ;
1417use crate :: runtime:: Executor ;
1518use crate :: runtime:: Handle ;
1619use crate :: runtime:: IoTask ;
1720use crate :: runtime:: LocalExecutor ;
1821use crate :: runtime:: LocalSpawn ;
1922
23+ #[ allow( dead_code) ]
2024/// An executor that dispatches work across a fixed set of underlying executors.
2125///
2226/// Tasks are assigned round-robin; there is no work stealing. This is intended to pair with
@@ -26,6 +30,7 @@ pub(crate) struct HandleSetExecutor {
2630 picker : AtomicUsize ,
2731}
2832
33+ #[ allow( dead_code) ]
2934impl HandleSetExecutor {
3035 pub ( crate ) fn new ( executors : Vec < Arc < dyn Executor > > ) -> Self {
3136 assert ! ( !executors. is_empty( ) ) ;
@@ -42,6 +47,7 @@ impl HandleSetExecutor {
4247 }
4348}
4449
50+ #[ allow( dead_code) ]
4551/// A thin wrapper around a set of executors that produces a dispatching [`Handle`].
4652///
4753/// This is intended to be backed by per-core runtimes (e.g., io_uring reactors), but it can be
@@ -51,6 +57,7 @@ pub(crate) struct HandleSet {
5157 dispatcher : Arc < HandleSetExecutor > ,
5258}
5359
60+ #[ allow( dead_code) ]
5461impl HandleSet {
5562 pub ( crate ) fn new ( executors : Vec < Arc < dyn Executor > > ) -> Self {
5663 let executors: Arc < [ Arc < dyn Executor > ] > = executors. into ( ) ;
@@ -75,6 +82,71 @@ impl HandleSet {
7582 }
7683}
7784
85+ /// Create a [`Handle`] that dispatches work round-robin across the provided handles.
86+ ///
87+ /// This is useful for thread-per-core runtimes where each handle is tied to a single reactor.
88+ pub fn dispatching_handle ( handles : & [ Handle ] ) -> Handle {
89+ let executors = handles
90+ . iter ( )
91+ . map ( |h| h. runtime ( ) )
92+ . collect :: < Vec < _ > > ( ) ;
93+ let set = HandleSet :: new ( executors) ;
94+ set. dispatching_handle ( )
95+ }
96+
97+ /// A lightweight per-core pool using current-thread runtimes and background workers.
98+ ///
99+ /// This is a stopgap until a true io_uring-backed runtime is wired in. Each core owns its own
100+ /// executor driven by a single worker thread, and the exposed handle dispatches round-robin
101+ /// across them.
102+ #[ allow( dead_code) ]
103+ pub struct PerCoreRuntimePool {
104+ cores : Vec < CurrentThreadCore > ,
105+ handle : Handle ,
106+ }
107+
108+ #[ allow( dead_code) ]
109+ impl PerCoreRuntimePool {
110+ /// Build a pool with `cores` runtimes (defaults to available_parallelism if None).
111+ pub fn new ( cores : Option < usize > ) -> Self {
112+ let core_count = cores
113+ . or_else ( || std:: thread:: available_parallelism ( ) . ok ( ) . map ( |n| n. get ( ) ) )
114+ . unwrap_or ( 1 ) ;
115+
116+ let cores: Vec < _ > = ( 0 ..core_count) . map ( |_| CurrentThreadCore :: new ( ) ) . collect ( ) ;
117+ let handles: Vec < _ > = cores. iter ( ) . map ( |c| c. handle ( ) ) . collect ( ) ;
118+ let handle = dispatching_handle ( & handles) ;
119+
120+ Self { cores, handle }
121+ }
122+
123+ /// A handle that spreads work across the per-core runtimes.
124+ pub fn handle ( & self ) -> Handle {
125+ self . handle . clone ( )
126+ }
127+ }
128+
129+ struct CurrentThreadCore {
130+ runtime : CurrentThreadRuntime ,
131+ _pool : CurrentThreadWorkerPool ,
132+ }
133+
134+ impl CurrentThreadCore {
135+ fn new ( ) -> Self {
136+ let runtime = CurrentThreadRuntime :: new ( ) ;
137+ let pool = runtime. new_pool ( ) ;
138+ pool. set_workers ( 1 ) ;
139+ Self {
140+ runtime,
141+ _pool : pool,
142+ }
143+ }
144+
145+ fn handle ( & self ) -> Handle {
146+ self . runtime . handle ( )
147+ }
148+ }
149+
78150impl Executor for HandleSetExecutor {
79151 fn spawn ( & self , fut : BoxFuture < ' static , ( ) > ) -> AbortHandleRef {
80152 self . pick ( ) . spawn ( fut)
0 commit comments