Skip to content

Commit e61bfaa

Browse files
dhowellsbrauner
authored andcommitted
netfs: Add functions to build/clean a buffer in a folio_queue
Add two netfslib functions to build up or clean up a buffer in a folio_queue. The first, netfs_alloc_folioq_buffer() will add folios to a buffer, extending up at least to the given size. If it can, it will add multipage folios. The folios are optionally have the mapping set and will have the index set according to the distance from the front of the folio queue. The second function will free up a folio queue and put any folios in the queue that have the first mark set. The netfs_folio tracepoint is also altered to cope with folios that have a NULL mapping, and the folios being added/put will have trace lines emitted and will be accounted in the stats. Signed-off-by: David Howells <[email protected]> Link: https://lore.kernel.org/r/[email protected] cc: Jeff Layton <[email protected]> cc: Marc Dionne <[email protected]> cc: [email protected] cc: [email protected] cc: [email protected] Signed-off-by: Christian Brauner <[email protected]>
1 parent 9e70501 commit e61bfaa

File tree

3 files changed

+104
-4
lines changed

3 files changed

+104
-4
lines changed

fs/netfs/misc.c

Lines changed: 96 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,102 @@
88
#include <linux/swap.h>
99
#include "internal.h"
1010

11+
/**
12+
* netfs_alloc_folioq_buffer - Allocate buffer space into a folio queue
13+
* @mapping: Address space to set on the folio (or NULL).
14+
* @_buffer: Pointer to the folio queue to add to (may point to a NULL; updated).
15+
* @_cur_size: Current size of the buffer (updated).
16+
* @size: Target size of the buffer.
17+
* @gfp: The allocation constraints.
18+
*/
19+
int netfs_alloc_folioq_buffer(struct address_space *mapping,
20+
struct folio_queue **_buffer,
21+
size_t *_cur_size, ssize_t size, gfp_t gfp)
22+
{
23+
struct folio_queue *tail = *_buffer, *p;
24+
25+
size = round_up(size, PAGE_SIZE);
26+
if (*_cur_size >= size)
27+
return 0;
28+
29+
if (tail)
30+
while (tail->next)
31+
tail = tail->next;
32+
33+
do {
34+
struct folio *folio;
35+
int order = 0, slot;
36+
37+
if (!tail || folioq_full(tail)) {
38+
p = netfs_folioq_alloc(0, GFP_NOFS, netfs_trace_folioq_alloc_buffer);
39+
if (!p)
40+
return -ENOMEM;
41+
if (tail) {
42+
tail->next = p;
43+
p->prev = tail;
44+
} else {
45+
*_buffer = p;
46+
}
47+
tail = p;
48+
}
49+
50+
if (size - *_cur_size > PAGE_SIZE)
51+
order = umin(ilog2(size - *_cur_size) - PAGE_SHIFT,
52+
MAX_PAGECACHE_ORDER);
53+
54+
folio = folio_alloc(gfp, order);
55+
if (!folio && order > 0)
56+
folio = folio_alloc(gfp, 0);
57+
if (!folio)
58+
return -ENOMEM;
59+
60+
folio->mapping = mapping;
61+
folio->index = *_cur_size / PAGE_SIZE;
62+
trace_netfs_folio(folio, netfs_folio_trace_alloc_buffer);
63+
slot = folioq_append_mark(tail, folio);
64+
*_cur_size += folioq_folio_size(tail, slot);
65+
} while (*_cur_size < size);
66+
67+
return 0;
68+
}
69+
EXPORT_SYMBOL(netfs_alloc_folioq_buffer);
70+
71+
/**
72+
* netfs_free_folioq_buffer - Free a folio queue.
73+
* @fq: The start of the folio queue to free
74+
*
75+
* Free up a chain of folio_queues and, if marked, the marked folios they point
76+
* to.
77+
*/
78+
void netfs_free_folioq_buffer(struct folio_queue *fq)
79+
{
80+
struct folio_queue *next;
81+
struct folio_batch fbatch;
82+
83+
folio_batch_init(&fbatch);
84+
85+
for (; fq; fq = next) {
86+
for (int slot = 0; slot < folioq_count(fq); slot++) {
87+
struct folio *folio = folioq_folio(fq, slot);
88+
89+
if (!folio ||
90+
!folioq_is_marked(fq, slot))
91+
continue;
92+
93+
trace_netfs_folio(folio, netfs_folio_trace_put);
94+
if (folio_batch_add(&fbatch, folio))
95+
folio_batch_release(&fbatch);
96+
}
97+
98+
netfs_stat_d(&netfs_n_folioq);
99+
next = fq->next;
100+
kfree(fq);
101+
}
102+
103+
folio_batch_release(&fbatch);
104+
}
105+
EXPORT_SYMBOL(netfs_free_folioq_buffer);
106+
11107
/*
12108
* Reset the subrequest iterator to refer just to the region remaining to be
13109
* read. The iterator may or may not have been advanced by socket ops or

include/linux/netfs.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -457,6 +457,12 @@ struct folio_queue *netfs_folioq_alloc(unsigned int rreq_id, gfp_t gfp,
457457
void netfs_folioq_free(struct folio_queue *folioq,
458458
unsigned int trace /*enum netfs_trace_folioq*/);
459459

460+
/* Buffer wrangling helpers API. */
461+
int netfs_alloc_folioq_buffer(struct address_space *mapping,
462+
struct folio_queue **_buffer,
463+
size_t *_cur_size, ssize_t size, gfp_t gfp);
464+
void netfs_free_folioq_buffer(struct folio_queue *fq);
465+
460466
/**
461467
* netfs_inode - Get the netfs inode context from the inode
462468
* @inode: The inode to query

include/trace/events/netfs.h

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -155,6 +155,7 @@
155155
EM(netfs_streaming_filled_page, "mod-streamw-f") \
156156
EM(netfs_streaming_cont_filled_page, "mod-streamw-f+") \
157157
EM(netfs_folio_trace_abandon, "abandon") \
158+
EM(netfs_folio_trace_alloc_buffer, "alloc-buf") \
158159
EM(netfs_folio_trace_cancel_copy, "cancel-copy") \
159160
EM(netfs_folio_trace_cancel_store, "cancel-store") \
160161
EM(netfs_folio_trace_clear, "clear") \
@@ -195,10 +196,7 @@
195196
E_(netfs_trace_donate_to_deferred_next, "defer-next")
196197

197198
#define netfs_folioq_traces \
198-
EM(netfs_trace_folioq_alloc_append_folio, "alloc-apf") \
199-
EM(netfs_trace_folioq_alloc_read_prep, "alloc-r-prep") \
200-
EM(netfs_trace_folioq_alloc_read_prime, "alloc-r-prime") \
201-
EM(netfs_trace_folioq_alloc_read_sing, "alloc-r-sing") \
199+
EM(netfs_trace_folioq_alloc_buffer, "alloc-buf") \
202200
EM(netfs_trace_folioq_clear, "clear") \
203201
EM(netfs_trace_folioq_delete, "delete") \
204202
EM(netfs_trace_folioq_make_space, "make-space") \

0 commit comments

Comments
 (0)