@@ -1304,16 +1304,16 @@ static inline void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
1304
1304
{
1305
1305
struct io_ring_ctx * ctx = req -> ctx ;
1306
1306
unsigned nr_wait , nr_tw , nr_tw_prev ;
1307
- struct llist_node * first ;
1307
+ struct llist_node * head ;
1308
1308
1309
1309
if (req -> flags & (REQ_F_LINK | REQ_F_HARDLINK ))
1310
1310
flags &= ~IOU_F_TWQ_LAZY_WAKE ;
1311
1311
1312
- first = READ_ONCE (ctx -> work_llist .first );
1312
+ head = READ_ONCE (ctx -> work_llist .first );
1313
1313
do {
1314
1314
nr_tw_prev = 0 ;
1315
- if (first ) {
1316
- struct io_kiocb * first_req = container_of (first ,
1315
+ if (head ) {
1316
+ struct io_kiocb * first_req = container_of (head ,
1317
1317
struct io_kiocb ,
1318
1318
io_task_work .node );
1319
1319
/*
@@ -1328,8 +1328,8 @@ static inline void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
1328
1328
nr_tw = INT_MAX ;
1329
1329
1330
1330
req -> nr_tw = nr_tw ;
1331
- req -> io_task_work .node .next = first ;
1332
- } while (!try_cmpxchg (& ctx -> work_llist .first , & first ,
1331
+ req -> io_task_work .node .next = head ;
1332
+ } while (!try_cmpxchg (& ctx -> work_llist .first , & head ,
1333
1333
& req -> io_task_work .node ));
1334
1334
1335
1335
/*
@@ -1340,7 +1340,7 @@ static inline void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
1340
1340
* is similar to the wait/wawke task state sync.
1341
1341
*/
1342
1342
1343
- if (!first ) {
1343
+ if (!head ) {
1344
1344
if (ctx -> flags & IORING_SETUP_TASKRUN_FLAG )
1345
1345
atomic_or (IORING_SQ_TASKRUN , & ctx -> rings -> sq_flags );
1346
1346
if (ctx -> has_evfd )
0 commit comments