@@ -34,7 +34,7 @@ static bool irq_work_claim(struct irq_work *work)
34
34
oflags = atomic_fetch_or (IRQ_WORK_CLAIMED , & work -> flags );
35
35
/*
36
36
* If the work is already pending, no need to raise the IPI.
37
- * The pairing atomic_xchg () in irq_work_run() makes sure
37
+ * The pairing atomic_fetch_andnot () in irq_work_run() makes sure
38
38
* everything we did before is visible.
39
39
*/
40
40
if (oflags & IRQ_WORK_PENDING )
@@ -135,7 +135,6 @@ static void irq_work_run_list(struct llist_head *list)
135
135
{
136
136
struct irq_work * work , * tmp ;
137
137
struct llist_node * llnode ;
138
- int flags ;
139
138
140
139
BUG_ON (!irqs_disabled ());
141
140
@@ -144,15 +143,15 @@ static void irq_work_run_list(struct llist_head *list)
144
143
145
144
llnode = llist_del_all (list );
146
145
llist_for_each_entry_safe (work , tmp , llnode , llnode ) {
146
+ int flags ;
147
147
/*
148
148
* Clear the PENDING bit, after this point the @work
149
149
* can be re-used.
150
150
* Make it immediately visible so that other CPUs trying
151
151
* to claim that work don't rely on us to handle their data
152
152
* while we are in the middle of the func.
153
153
*/
154
- flags = atomic_read (& work -> flags ) & ~IRQ_WORK_PENDING ;
155
- atomic_xchg (& work -> flags , flags );
154
+ flags = atomic_fetch_andnot (IRQ_WORK_PENDING , & work -> flags );
156
155
157
156
work -> func (work );
158
157
/*
0 commit comments