Skip to content

Commit 94a4274

Browse files
calebsanderaxboe
authored andcommitted
io_uring: pass struct io_tw_state by value
8e5b3b8 ("io_uring: remove struct io_tw_state::locked") removed the only field of io_tw_state but kept it as a task work callback argument to "forc[e] users not to invoke them carelessly out of a wrong context". Passing the struct io_tw_state * argument adds a few instructions to all callers that can't inline the functions and see the argument is unused. So pass struct io_tw_state by value instead. Since it's a 0-sized value, it can be passed without any instructions needed to initialize it. Signed-off-by: Caleb Sander Mateos <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jens Axboe <[email protected]>
1 parent bcf8a02 commit 94a4274

File tree

2 files changed

+8
-8
lines changed

2 files changed

+8
-8
lines changed

include/linux/io_uring_types.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -444,7 +444,7 @@ struct io_ring_ctx {
444444
struct io_tw_state {
445445
};
446446
/* Alias to use in code that doesn't instantiate struct io_tw_state */
447-
typedef struct io_tw_state *io_tw_token_t;
447+
typedef struct io_tw_state io_tw_token_t;
448448

449449
enum {
450450
REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,

io_uring/io_uring.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -255,7 +255,7 @@ static __cold void io_fallback_req_func(struct work_struct *work)
255255
percpu_ref_get(&ctx->refs);
256256
mutex_lock(&ctx->uring_lock);
257257
llist_for_each_entry_safe(req, tmp, node, io_task_work.node)
258-
req->io_task_work.func(req, &ts);
258+
req->io_task_work.func(req, ts);
259259
io_submit_flush_completions(ctx);
260260
mutex_unlock(&ctx->uring_lock);
261261
percpu_ref_put(&ctx->refs);
@@ -1052,24 +1052,24 @@ struct llist_node *io_handle_tw_list(struct llist_node *node,
10521052
io_task_work.node);
10531053

10541054
if (req->ctx != ctx) {
1055-
ctx_flush_and_put(ctx, &ts);
1055+
ctx_flush_and_put(ctx, ts);
10561056
ctx = req->ctx;
10571057
mutex_lock(&ctx->uring_lock);
10581058
percpu_ref_get(&ctx->refs);
10591059
}
10601060
INDIRECT_CALL_2(req->io_task_work.func,
10611061
io_poll_task_func, io_req_rw_complete,
1062-
req, &ts);
1062+
req, ts);
10631063
node = next;
10641064
(*count)++;
10651065
if (unlikely(need_resched())) {
1066-
ctx_flush_and_put(ctx, &ts);
1066+
ctx_flush_and_put(ctx, ts);
10671067
ctx = NULL;
10681068
cond_resched();
10691069
}
10701070
} while (node && *count < max_entries);
10711071

1072-
ctx_flush_and_put(ctx, &ts);
1072+
ctx_flush_and_put(ctx, ts);
10731073
return node;
10741074
}
10751075

@@ -1341,7 +1341,7 @@ static inline int io_run_local_work_locked(struct io_ring_ctx *ctx,
13411341

13421342
if (!io_local_work_pending(ctx))
13431343
return 0;
1344-
return __io_run_local_work(ctx, &ts, min_events,
1344+
return __io_run_local_work(ctx, ts, min_events,
13451345
max(IO_LOCAL_TW_DEFAULT_MAX, min_events));
13461346
}
13471347

@@ -1352,7 +1352,7 @@ static int io_run_local_work(struct io_ring_ctx *ctx, int min_events,
13521352
int ret;
13531353

13541354
mutex_lock(&ctx->uring_lock);
1355-
ret = __io_run_local_work(ctx, &ts, min_events, max_events);
1355+
ret = __io_run_local_work(ctx, ts, min_events, max_events);
13561356
mutex_unlock(&ctx->uring_lock);
13571357
return ret;
13581358
}

0 commit comments

Comments
 (0)