Skip to content

Commit 4fcccc3

Browse files
committed
netfs: Make the refcounting of netfs_begin_read() easier to use
Make the refcounting of netfs_begin_read() easier to use by not eating the caller's ref on the netfs_io_request it's given. This makes it easier to use when we need to look in the request struct after. Signed-off-by: David Howells <[email protected]> Reviewed-by: Jeff Layton <[email protected]> cc: [email protected] cc: [email protected] cc: [email protected]
1 parent 6ba22d8 commit 4fcccc3

File tree

3 files changed

+23
-20
lines changed

3 files changed

+23
-20
lines changed

fs/netfs/buffered_read.c

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -210,6 +210,7 @@ void netfs_readahead(struct readahead_control *ractl)
210210
;
211211

212212
netfs_begin_read(rreq, false);
213+
netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
213214
return;
214215

215216
cleanup_free:
@@ -260,7 +261,9 @@ int netfs_read_folio(struct file *file, struct folio *folio)
260261
iov_iter_xarray(&rreq->iter, ITER_DEST, &mapping->i_pages,
261262
rreq->start, rreq->len);
262263

263-
return netfs_begin_read(rreq, true);
264+
ret = netfs_begin_read(rreq, true);
265+
netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
266+
return ret;
264267

265268
discard:
266269
netfs_put_request(rreq, false, netfs_rreq_trace_put_discard);
@@ -429,6 +432,7 @@ int netfs_write_begin(struct netfs_inode *ctx,
429432
ret = netfs_begin_read(rreq, true);
430433
if (ret < 0)
431434
goto error;
435+
netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
432436

433437
have_folio:
434438
ret = folio_wait_fscache_killable(folio);

fs/netfs/io.c

Lines changed: 13 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -362,6 +362,7 @@ static void netfs_rreq_assess(struct netfs_io_request *rreq, bool was_async)
362362

363363
netfs_rreq_unlock_folios(rreq);
364364

365+
trace_netfs_rreq(rreq, netfs_rreq_trace_wake_ip);
365366
clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
366367
wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
367368

@@ -657,20 +658,17 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
657658

658659
if (rreq->len == 0) {
659660
pr_err("Zero-sized read [R=%x]\n", rreq->debug_id);
660-
netfs_put_request(rreq, false, netfs_rreq_trace_put_zero_len);
661661
return -EIO;
662662
}
663663

664664
rreq->io_iter = rreq->iter;
665665

666666
INIT_WORK(&rreq->work, netfs_rreq_work);
667667

668-
if (sync)
669-
netfs_get_request(rreq, netfs_rreq_trace_get_hold);
670-
671668
/* Chop the read into slices according to what the cache and the netfs
672669
* want and submit each one.
673670
*/
671+
netfs_get_request(rreq, netfs_rreq_trace_get_for_outstanding);
674672
atomic_set(&rreq->nr_outstanding, 1);
675673
io_iter = rreq->io_iter;
676674
do {
@@ -680,25 +678,25 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
680678
} while (rreq->submitted < rreq->len);
681679

682680
if (sync) {
683-
/* Keep nr_outstanding incremented so that the ref always belongs to
684-
* us, and the service code isn't punted off to a random thread pool to
685-
* process.
681+
/* Keep nr_outstanding incremented so that the ref always
682+
* belongs to us, and the service code isn't punted off to a
683+
* random thread pool to process. Note that this might start
684+
* further work, such as writing to the cache.
686685
*/
687-
for (;;) {
688-
wait_var_event(&rreq->nr_outstanding,
689-
atomic_read(&rreq->nr_outstanding) == 1);
686+
wait_var_event(&rreq->nr_outstanding,
687+
atomic_read(&rreq->nr_outstanding) == 1);
688+
if (atomic_dec_and_test(&rreq->nr_outstanding))
690689
netfs_rreq_assess(rreq, false);
691-
if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
692-
break;
693-
cond_resched();
694-
}
690+
691+
trace_netfs_rreq(rreq, netfs_rreq_trace_wait_ip);
692+
wait_on_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS,
693+
TASK_UNINTERRUPTIBLE);
695694

696695
ret = rreq->error;
697696
if (ret == 0 && rreq->submitted < rreq->len) {
698697
trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
699698
ret = -EIO;
700699
}
701-
netfs_put_request(rreq, false, netfs_rreq_trace_put_hold);
702700
} else {
703701
/* If we decrement nr_outstanding to 0, the ref belongs to us. */
704702
if (atomic_dec_and_test(&rreq->nr_outstanding))

include/trace/events/netfs.h

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,9 @@
3434
EM(netfs_rreq_trace_free, "FREE ") \
3535
EM(netfs_rreq_trace_resubmit, "RESUBMT") \
3636
EM(netfs_rreq_trace_unlock, "UNLOCK ") \
37-
E_(netfs_rreq_trace_unmark, "UNMARK ")
37+
EM(netfs_rreq_trace_unmark, "UNMARK ") \
38+
EM(netfs_rreq_trace_wait_ip, "WAIT-IP") \
39+
E_(netfs_rreq_trace_wake_ip, "WAKE-IP")
3840

3941
#define netfs_sreq_sources \
4042
EM(NETFS_FILL_WITH_ZEROES, "ZERO") \
@@ -65,14 +67,13 @@
6567
E_(netfs_fail_prepare_write, "prep-write")
6668

6769
#define netfs_rreq_ref_traces \
68-
EM(netfs_rreq_trace_get_hold, "GET HOLD ") \
70+
EM(netfs_rreq_trace_get_for_outstanding,"GET OUTSTND") \
6971
EM(netfs_rreq_trace_get_subreq, "GET SUBREQ ") \
7072
EM(netfs_rreq_trace_put_complete, "PUT COMPLT ") \
7173
EM(netfs_rreq_trace_put_discard, "PUT DISCARD") \
7274
EM(netfs_rreq_trace_put_failed, "PUT FAILED ") \
73-
EM(netfs_rreq_trace_put_hold, "PUT HOLD ") \
75+
EM(netfs_rreq_trace_put_return, "PUT RETURN ") \
7476
EM(netfs_rreq_trace_put_subreq, "PUT SUBREQ ") \
75-
EM(netfs_rreq_trace_put_zero_len, "PUT ZEROLEN") \
7677
E_(netfs_rreq_trace_new, "NEW ")
7778

7879
#define netfs_sreq_ref_traces \

0 commit comments

Comments
 (0)