Skip to content

Commit 2b1424c

Browse files
dhowellsbrauner
authored andcommitted
netfs: Fix wait/wake to be consistent about the waitqueue used
Fix further inconsistencies in the use of waitqueues (clear_and_wake_up_bit() vs private waitqueue). Move some of this stuff from the read and write sides into common code so that it can be done in fewer places. To make this work, async I/O needs to set NETFS_RREQ_OFFLOAD_COLLECTION to indicate that a workqueue will do the collecting and places that call the wait function need to deal with it returning the amount transferred. Fixes: e2d46f2 ("netfs: Change the read result collector to only use one work item") Signed-off-by: David Howells <[email protected]> Link: https://lore.kernel.org/[email protected] cc: Marc Dionne <[email protected]> cc: Steve French <[email protected]> cc: Ihor Solodrai <[email protected]> cc: Eric Van Hensbergen <[email protected]> cc: Latchesar Ionkov <[email protected]> cc: Dominique Martinet <[email protected]> cc: Christian Schoenebeck <[email protected]> cc: Paulo Alcantara <[email protected]> cc: Jeff Layton <[email protected]> cc: [email protected] cc: [email protected] cc: [email protected] cc: [email protected] Signed-off-by: Christian Brauner <[email protected]>
1 parent 20d72b0 commit 2b1424c

File tree

11 files changed

+284
-224
lines changed

11 files changed

+284
-224
lines changed

fs/netfs/buffered_read.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -312,7 +312,7 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
312312
if (unlikely(size > 0)) {
313313
smp_wmb(); /* Write lists before ALL_QUEUED. */
314314
set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags);
315-
netfs_wake_read_collector(rreq);
315+
netfs_wake_collector(rreq);
316316
}
317317

318318
/* Defer error return as we may need to wait for outstanding I/O. */

fs/netfs/buffered_write.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -386,7 +386,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
386386
wbc_detach_inode(&wbc);
387387
if (ret2 == -EIOCBQUEUED)
388388
return ret2;
389-
if (ret == 0)
389+
if (ret == 0 && ret2 < 0)
390390
ret = ret2;
391391
}
392392

fs/netfs/direct_read.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ static int netfs_dispatch_unbuffered_reads(struct netfs_io_request *rreq)
103103
rreq->netfs_ops->issue_read(subreq);
104104

105105
if (test_bit(NETFS_RREQ_PAUSE, &rreq->flags))
106-
netfs_wait_for_pause(rreq);
106+
netfs_wait_for_paused_read(rreq);
107107
if (test_bit(NETFS_RREQ_FAILED, &rreq->flags))
108108
break;
109109
if (test_bit(NETFS_RREQ_BLOCKED, &rreq->flags) &&
@@ -115,7 +115,7 @@ static int netfs_dispatch_unbuffered_reads(struct netfs_io_request *rreq)
115115
if (unlikely(size > 0)) {
116116
smp_wmb(); /* Write lists before ALL_QUEUED. */
117117
set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags);
118-
netfs_wake_read_collector(rreq);
118+
netfs_wake_collector(rreq);
119119
}
120120

121121
return ret;

fs/netfs/direct_write.c

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -87,6 +87,8 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
8787
}
8888

8989
__set_bit(NETFS_RREQ_USE_IO_ITER, &wreq->flags);
90+
if (async)
91+
__set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &wreq->flags);
9092

9193
/* Copy the data into the bounce buffer and encrypt it. */
9294
// TODO
@@ -105,13 +107,9 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
105107

106108
if (!async) {
107109
trace_netfs_rreq(wreq, netfs_rreq_trace_wait_ip);
108-
wait_on_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS,
109-
TASK_UNINTERRUPTIBLE);
110-
ret = wreq->error;
111-
if (ret == 0) {
112-
ret = wreq->transferred;
110+
ret = netfs_wait_for_write(wreq);
111+
if (ret > 0)
113112
iocb->ki_pos += ret;
114-
}
115113
} else {
116114
ret = -EIOCBQUEUED;
117115
}

fs/netfs/internal.h

Lines changed: 27 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,14 @@ static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq) {}
6262
struct folio_queue *netfs_buffer_make_space(struct netfs_io_request *rreq,
6363
enum netfs_folioq_trace trace);
6464
void netfs_reset_iter(struct netfs_io_subrequest *subreq);
65+
void netfs_wake_collector(struct netfs_io_request *rreq);
66+
void netfs_subreq_clear_in_progress(struct netfs_io_subrequest *subreq);
67+
void netfs_wait_for_in_progress_stream(struct netfs_io_request *rreq,
68+
struct netfs_io_stream *stream);
69+
ssize_t netfs_wait_for_read(struct netfs_io_request *rreq);
70+
ssize_t netfs_wait_for_write(struct netfs_io_request *rreq);
71+
void netfs_wait_for_paused_read(struct netfs_io_request *rreq);
72+
void netfs_wait_for_paused_write(struct netfs_io_request *rreq);
6573

6674
/*
6775
* objects.c
@@ -91,11 +99,9 @@ static inline void netfs_see_subrequest(struct netfs_io_subrequest *subreq,
9199
/*
92100
* read_collect.c
93101
*/
102+
bool netfs_read_collection(struct netfs_io_request *rreq);
94103
void netfs_read_collection_worker(struct work_struct *work);
95-
void netfs_wake_read_collector(struct netfs_io_request *rreq);
96104
void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error);
97-
ssize_t netfs_wait_for_read(struct netfs_io_request *rreq);
98-
void netfs_wait_for_pause(struct netfs_io_request *rreq);
99105

100106
/*
101107
* read_pgpriv2.c
@@ -175,8 +181,8 @@ static inline void netfs_stat_d(atomic_t *stat)
175181
* write_collect.c
176182
*/
177183
int netfs_folio_written_back(struct folio *folio);
184+
bool netfs_write_collection(struct netfs_io_request *wreq);
178185
void netfs_write_collection_worker(struct work_struct *work);
179-
void netfs_wake_write_collector(struct netfs_io_request *wreq);
180186

181187
/*
182188
* write_issue.c
@@ -197,8 +203,8 @@ struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len
197203
int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
198204
struct folio *folio, size_t copied, bool to_page_end,
199205
struct folio **writethrough_cache);
200-
int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
201-
struct folio *writethrough_cache);
206+
ssize_t netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
207+
struct folio *writethrough_cache);
202208
int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len);
203209

204210
/*
@@ -253,6 +259,21 @@ static inline void netfs_put_group_many(struct netfs_group *netfs_group, int nr)
253259
netfs_group->free(netfs_group);
254260
}
255261

262+
/*
263+
* Clear and wake up a NETFS_RREQ_* flag bit on a request.
264+
*/
265+
static inline void netfs_wake_rreq_flag(struct netfs_io_request *rreq,
266+
unsigned int rreq_flag,
267+
enum netfs_rreq_trace trace)
268+
{
269+
if (test_bit(rreq_flag, &rreq->flags)) {
270+
trace_netfs_rreq(rreq, trace);
271+
clear_bit_unlock(rreq_flag, &rreq->flags);
272+
smp_mb__after_atomic(); /* Set flag before task state */
273+
wake_up(&rreq->waitq);
274+
}
275+
}
276+
256277
/*
257278
* fscache-cache.c
258279
*/

fs/netfs/misc.c

Lines changed: 218 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -313,3 +313,221 @@ bool netfs_release_folio(struct folio *folio, gfp_t gfp)
313313
return true;
314314
}
315315
EXPORT_SYMBOL(netfs_release_folio);
316+
317+
/*
318+
* Wake the collection work item.
319+
*/
320+
void netfs_wake_collector(struct netfs_io_request *rreq)
321+
{
322+
if (test_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags) &&
323+
!test_bit(NETFS_RREQ_RETRYING, &rreq->flags)) {
324+
queue_work(system_unbound_wq, &rreq->work);
325+
} else {
326+
trace_netfs_rreq(rreq, netfs_rreq_trace_wake_queue);
327+
wake_up(&rreq->waitq);
328+
}
329+
}
330+
331+
/*
332+
* Mark a subrequest as no longer being in progress and, if need be, wake the
333+
* collector.
334+
*/
335+
void netfs_subreq_clear_in_progress(struct netfs_io_subrequest *subreq)
336+
{
337+
struct netfs_io_request *rreq = subreq->rreq;
338+
struct netfs_io_stream *stream = &rreq->io_streams[subreq->stream_nr];
339+
340+
clear_bit_unlock(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
341+
smp_mb__after_atomic(); /* Clear IN_PROGRESS before task state */
342+
343+
/* If we are at the head of the queue, wake up the collector. */
344+
if (list_is_first(&subreq->rreq_link, &stream->subrequests) ||
345+
test_bit(NETFS_RREQ_RETRYING, &rreq->flags))
346+
netfs_wake_collector(rreq);
347+
}
348+
349+
/*
350+
* Wait for all outstanding I/O in a stream to quiesce.
351+
*/
352+
void netfs_wait_for_in_progress_stream(struct netfs_io_request *rreq,
353+
struct netfs_io_stream *stream)
354+
{
355+
struct netfs_io_subrequest *subreq;
356+
DEFINE_WAIT(myself);
357+
358+
list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
359+
if (!test_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags))
360+
continue;
361+
362+
trace_netfs_rreq(rreq, netfs_rreq_trace_wait_queue);
363+
for (;;) {
364+
prepare_to_wait(&rreq->waitq, &myself, TASK_UNINTERRUPTIBLE);
365+
366+
if (!test_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags))
367+
break;
368+
369+
trace_netfs_sreq(subreq, netfs_sreq_trace_wait_for);
370+
schedule();
371+
trace_netfs_rreq(rreq, netfs_rreq_trace_woke_queue);
372+
}
373+
}
374+
375+
finish_wait(&rreq->waitq, &myself);
376+
}
377+
378+
/*
379+
* Perform collection in app thread if not offloaded to workqueue.
380+
*/
381+
static int netfs_collect_in_app(struct netfs_io_request *rreq,
382+
bool (*collector)(struct netfs_io_request *rreq))
383+
{
384+
bool need_collect = false, inactive = true;
385+
386+
for (int i = 0; i < NR_IO_STREAMS; i++) {
387+
struct netfs_io_subrequest *subreq;
388+
struct netfs_io_stream *stream = &rreq->io_streams[i];
389+
390+
if (!stream->active)
391+
continue;
392+
inactive = false;
393+
trace_netfs_collect_stream(rreq, stream);
394+
subreq = list_first_entry_or_null(&stream->subrequests,
395+
struct netfs_io_subrequest,
396+
rreq_link);
397+
if (subreq &&
398+
(!test_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags) ||
399+
test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags))) {
400+
need_collect = true;
401+
break;
402+
}
403+
}
404+
405+
if (!need_collect && !inactive)
406+
return 0; /* Sleep */
407+
408+
__set_current_state(TASK_RUNNING);
409+
if (collector(rreq)) {
410+
/* Drop the ref from the NETFS_RREQ_IN_PROGRESS flag. */
411+
netfs_put_request(rreq, netfs_rreq_trace_put_work_ip);
412+
return 1; /* Done */
413+
}
414+
415+
if (inactive) {
416+
WARN(true, "Failed to collect inactive req R=%08x\n",
417+
rreq->debug_id);
418+
cond_resched();
419+
}
420+
return 2; /* Again */
421+
}
422+
423+
/*
424+
* Wait for a request to complete, successfully or otherwise.
425+
*/
426+
static ssize_t netfs_wait_for_request(struct netfs_io_request *rreq,
427+
bool (*collector)(struct netfs_io_request *rreq))
428+
{
429+
DEFINE_WAIT(myself);
430+
ssize_t ret;
431+
432+
for (;;) {
433+
trace_netfs_rreq(rreq, netfs_rreq_trace_wait_queue);
434+
prepare_to_wait(&rreq->waitq, &myself, TASK_UNINTERRUPTIBLE);
435+
436+
if (!test_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags)) {
437+
switch (netfs_collect_in_app(rreq, collector)) {
438+
case 0:
439+
break;
440+
case 1:
441+
goto all_collected;
442+
case 2:
443+
continue;
444+
}
445+
}
446+
447+
if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
448+
break;
449+
450+
schedule();
451+
trace_netfs_rreq(rreq, netfs_rreq_trace_woke_queue);
452+
}
453+
454+
all_collected:
455+
finish_wait(&rreq->waitq, &myself);
456+
457+
ret = rreq->error;
458+
if (ret == 0) {
459+
ret = rreq->transferred;
460+
switch (rreq->origin) {
461+
case NETFS_DIO_READ:
462+
case NETFS_DIO_WRITE:
463+
case NETFS_READ_SINGLE:
464+
case NETFS_UNBUFFERED_WRITE:
465+
break;
466+
default:
467+
if (rreq->submitted < rreq->len) {
468+
trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
469+
ret = -EIO;
470+
}
471+
break;
472+
}
473+
}
474+
475+
return ret;
476+
}
477+
478+
ssize_t netfs_wait_for_read(struct netfs_io_request *rreq)
479+
{
480+
return netfs_wait_for_request(rreq, netfs_read_collection);
481+
}
482+
483+
ssize_t netfs_wait_for_write(struct netfs_io_request *rreq)
484+
{
485+
return netfs_wait_for_request(rreq, netfs_write_collection);
486+
}
487+
488+
/*
489+
* Wait for a paused operation to unpause or complete in some manner.
490+
*/
491+
static void netfs_wait_for_pause(struct netfs_io_request *rreq,
492+
bool (*collector)(struct netfs_io_request *rreq))
493+
{
494+
DEFINE_WAIT(myself);
495+
496+
trace_netfs_rreq(rreq, netfs_rreq_trace_wait_pause);
497+
498+
for (;;) {
499+
trace_netfs_rreq(rreq, netfs_rreq_trace_wait_queue);
500+
prepare_to_wait(&rreq->waitq, &myself, TASK_UNINTERRUPTIBLE);
501+
502+
if (!test_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags)) {
503+
switch (netfs_collect_in_app(rreq, collector)) {
504+
case 0:
505+
break;
506+
case 1:
507+
goto all_collected;
508+
case 2:
509+
continue;
510+
}
511+
}
512+
513+
if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags) ||
514+
!test_bit(NETFS_RREQ_PAUSE, &rreq->flags))
515+
break;
516+
517+
schedule();
518+
trace_netfs_rreq(rreq, netfs_rreq_trace_woke_queue);
519+
}
520+
521+
all_collected:
522+
finish_wait(&rreq->waitq, &myself);
523+
}
524+
525+
void netfs_wait_for_paused_read(struct netfs_io_request *rreq)
526+
{
527+
return netfs_wait_for_pause(rreq, netfs_read_collection);
528+
}
529+
530+
void netfs_wait_for_paused_write(struct netfs_io_request *rreq)
531+
{
532+
return netfs_wait_for_pause(rreq, netfs_write_collection);
533+
}

0 commit comments

Comments
 (0)