File tree Expand file tree Collapse file tree 4 files changed +29
-12
lines changed
Expand file tree Collapse file tree 4 files changed +29
-12
lines changed Original file line number Diff line number Diff line change @@ -31,15 +31,13 @@ fn wake(data: *const ()) {
3131 let task_ref = unsafe { TaskRef :: from_raw ( task_header_ptr) } ;
3232
3333 task_ref. schedule_safety ( ) ;
34-
35- drop ( task_ref) ; // wake uses move semantic, so we are owner of data now, so we need to cleanup
3634}
3735
3836fn wake_by_ref ( data : * const ( ) ) {
3937 let task_header_ptr = data as * const TaskHeader ;
4038 let task_ref = unsafe { TaskRef :: from_raw ( task_header_ptr) } ;
4139
42- task_ref. schedule_safety ( ) ;
40+ task_ref. schedule_safety_by_ref ( ) ;
4341
4442 :: core:: mem:: forget ( task_ref) ; // don't touch refcount from our data since this is done by drop_waker
4543}
Original file line number Diff line number Diff line change @@ -543,23 +543,41 @@ impl TaskRef {
543543 }
544544
545545 ///
546- /// Proxy to `AsyncTask<T>::schedule `
546+ /// Proxy to `AsyncTask<T>::schedule_by_ref `
547547 ///
548- pub ( crate ) fn schedule ( & self ) {
548+ pub ( crate ) fn schedule_by_ref ( & self ) {
549549 unsafe {
550550 ( self . header . as_ref ( ) . vtable . schedule ) ( self . header , self . clone ( ) ) ;
551551 }
552552 }
553553
554+ ///
555+ /// Proxy to `AsyncTask<T>::schedule`
556+ ///
557+ pub ( crate ) fn schedule ( self ) {
558+ unsafe {
559+ ( self . header . as_ref ( ) . vtable . schedule ) ( self . header , self ) ;
560+ }
561+ }
562+
554563 ///
555564 /// Proxy to `AsyncTask<T>::schedule_safety`
556565 ///
557- pub ( crate ) fn schedule_safety ( & self ) {
566+ pub ( crate ) fn schedule_safety_by_ref ( & self ) {
558567 unsafe {
559568 ( self . header . as_ref ( ) . vtable . schedule_safety ) ( self . header , self . clone ( ) ) ;
560569 }
561570 }
562571
572+ ///
573+ /// Proxy to `AsyncTask<T>::schedule_safety`
574+ ///
575+ pub ( crate ) fn schedule_safety ( self ) {
576+ unsafe {
577+ ( self . header . as_ref ( ) . vtable . schedule_safety ) ( self . header , self ) ;
578+ }
579+ }
580+
563581 ///
564582 /// Proxy to `AsyncTask<T>::poll`
565583 ///
Original file line number Diff line number Diff line change @@ -33,15 +33,13 @@ fn wake(data: *const ()) {
3333 let task_ref = unsafe { TaskRef :: from_raw ( task_header_ptr) } ;
3434
3535 task_ref. schedule ( ) ;
36-
37- drop ( task_ref) ; // wake uses move semantic, so we are owner of data now, so we need to cleanup
3836}
3937
4038fn wake_by_ref ( data : * const ( ) ) {
4139 let task_header_ptr = data as * const TaskHeader ;
4240 let task_ref = unsafe { TaskRef :: from_raw ( task_header_ptr) } ;
4341
44- task_ref. schedule ( ) ;
42+ task_ref. schedule_by_ref ( ) ;
4543
4644 :: core:: mem:: forget ( task_ref) ; // don't touch refcount from our data since this is done by drop_waker
4745}
Original file line number Diff line number Diff line change @@ -256,13 +256,16 @@ impl Inner {
256256 fn process_expired ( & mut self , info : & ExpireInfo ) {
257257 let iter = self . levels [ info. level as usize ] . aquire_slot ( info) ;
258258
259- for e in iter {
260- let data = unsafe { e. as_ref ( ) } ;
259+ for mut e in iter {
260+ let data = unsafe { e. as_mut ( ) } ;
261261
262262 //TODO: We could keep waker list on side to fire them outside of the lock. This is next step improvement once we will connect this all into workers
263263 if data. data . expire_at <= info. deadline {
264264 // Wake up the task
265- data. data . waker . wake_by_ref ( ) ;
265+
266+ // Instead going over waker and waking by ref which is less efficient, we just replace with noop and wake by value
267+ let waker = core:: mem:: replace ( & mut data. data . waker , Waker :: noop ( ) . clone ( ) ) ;
268+ waker. wake ( ) ;
266269
267270 unsafe {
268271 :: core:: ptr:: drop_in_place ( e. as_ptr ( ) ) ;
You can’t perform that action at this time.
0 commit comments