@@ -159,7 +159,7 @@ static inline struct io_wq_acct *io_get_acct(struct io_wq *wq, bool bound)
159
159
static inline struct io_wq_acct * io_work_get_acct (struct io_wq * wq ,
160
160
struct io_wq_work * work )
161
161
{
162
- return io_get_acct (wq , !(work -> flags & IO_WQ_WORK_UNBOUND ));
162
+ return io_get_acct (wq , !(atomic_read ( & work -> flags ) & IO_WQ_WORK_UNBOUND ));
163
163
}
164
164
165
165
static inline struct io_wq_acct * io_wq_get_acct (struct io_worker * worker )
@@ -451,7 +451,7 @@ static void __io_worker_idle(struct io_wq *wq, struct io_worker *worker)
451
451
452
452
static inline unsigned int io_get_work_hash (struct io_wq_work * work )
453
453
{
454
- return work -> flags >> IO_WQ_HASH_SHIFT ;
454
+ return atomic_read ( & work -> flags ) >> IO_WQ_HASH_SHIFT ;
455
455
}
456
456
457
457
static bool io_wait_on_hash (struct io_wq * wq , unsigned int hash )
@@ -592,8 +592,9 @@ static void io_worker_handle_work(struct io_wq_acct *acct,
592
592
593
593
next_hashed = wq_next_work (work );
594
594
595
- if (unlikely (do_kill ) && (work -> flags & IO_WQ_WORK_UNBOUND ))
596
- work -> flags |= IO_WQ_WORK_CANCEL ;
595
+ if (do_kill &&
596
+ (atomic_read (& work -> flags ) & IO_WQ_WORK_UNBOUND ))
597
+ atomic_or (IO_WQ_WORK_CANCEL , & work -> flags );
597
598
wq -> do_work (work );
598
599
io_assign_current_work (worker , NULL );
599
600
@@ -891,7 +892,7 @@ static bool io_wq_worker_wake(struct io_worker *worker, void *data)
891
892
static void io_run_cancel (struct io_wq_work * work , struct io_wq * wq )
892
893
{
893
894
do {
894
- work -> flags |= IO_WQ_WORK_CANCEL ;
895
+ atomic_or ( IO_WQ_WORK_CANCEL , & work -> flags ) ;
895
896
wq -> do_work (work );
896
897
work = wq -> free_work (work );
897
898
} while (work );
@@ -926,7 +927,7 @@ static bool io_wq_work_match_item(struct io_wq_work *work, void *data)
926
927
void io_wq_enqueue (struct io_wq * wq , struct io_wq_work * work )
927
928
{
928
929
struct io_wq_acct * acct = io_work_get_acct (wq , work );
929
- unsigned long work_flags = work -> flags ;
930
+ unsigned int work_flags = atomic_read ( & work -> flags ) ;
930
931
struct io_cb_cancel_data match = {
931
932
.fn = io_wq_work_match_item ,
932
933
.data = work ,
@@ -939,7 +940,7 @@ void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
939
940
* been marked as one that should not get executed, cancel it here.
940
941
*/
941
942
if (test_bit (IO_WQ_BIT_EXIT , & wq -> state ) ||
942
- (work -> flags & IO_WQ_WORK_CANCEL )) {
943
+ (work_flags & IO_WQ_WORK_CANCEL )) {
943
944
io_run_cancel (work , wq );
944
945
return ;
945
946
}
@@ -982,15 +983,15 @@ void io_wq_hash_work(struct io_wq_work *work, void *val)
982
983
unsigned int bit ;
983
984
984
985
bit = hash_ptr (val , IO_WQ_HASH_ORDER );
985
- work -> flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT ));
986
+ atomic_or (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT ), & work -> flags );
986
987
}
987
988
988
989
static bool __io_wq_worker_cancel (struct io_worker * worker ,
989
990
struct io_cb_cancel_data * match ,
990
991
struct io_wq_work * work )
991
992
{
992
993
if (work && match -> fn (work , match -> data )) {
993
- work -> flags |= IO_WQ_WORK_CANCEL ;
994
+ atomic_or ( IO_WQ_WORK_CANCEL , & work -> flags ) ;
994
995
__set_notify_signal (worker -> task );
995
996
return true;
996
997
}
0 commit comments