|
8 | 8 | #include <linux/swap.h>
|
9 | 9 | #include "internal.h"
|
10 | 10 |
|
| 11 | +/** |
| 12 | + * netfs_alloc_folioq_buffer - Allocate buffer space into a folio queue |
| 13 | + * @mapping: Address space to set on the folio (or NULL). |
| 14 | + * @_buffer: Pointer to the folio queue to add to (may point to a NULL; updated). |
| 15 | + * @_cur_size: Current size of the buffer (updated). |
| 16 | + * @size: Target size of the buffer. |
| 17 | + * @gfp: The allocation constraints. |
| 18 | + */ |
| 19 | +int netfs_alloc_folioq_buffer(struct address_space *mapping, |
| 20 | + struct folio_queue **_buffer, |
| 21 | + size_t *_cur_size, ssize_t size, gfp_t gfp) |
| 22 | +{ |
| 23 | + struct folio_queue *tail = *_buffer, *p; |
| 24 | + |
| 25 | + size = round_up(size, PAGE_SIZE); |
| 26 | + if (*_cur_size >= size) |
| 27 | + return 0; |
| 28 | + |
| 29 | + if (tail) |
| 30 | + while (tail->next) |
| 31 | + tail = tail->next; |
| 32 | + |
| 33 | + do { |
| 34 | + struct folio *folio; |
| 35 | + int order = 0, slot; |
| 36 | + |
| 37 | + if (!tail || folioq_full(tail)) { |
| 38 | + p = netfs_folioq_alloc(0, GFP_NOFS, netfs_trace_folioq_alloc_buffer); |
| 39 | + if (!p) |
| 40 | + return -ENOMEM; |
| 41 | + if (tail) { |
| 42 | + tail->next = p; |
| 43 | + p->prev = tail; |
| 44 | + } else { |
| 45 | + *_buffer = p; |
| 46 | + } |
| 47 | + tail = p; |
| 48 | + } |
| 49 | + |
| 50 | + if (size - *_cur_size > PAGE_SIZE) |
| 51 | + order = umin(ilog2(size - *_cur_size) - PAGE_SHIFT, |
| 52 | + MAX_PAGECACHE_ORDER); |
| 53 | + |
| 54 | + folio = folio_alloc(gfp, order); |
| 55 | + if (!folio && order > 0) |
| 56 | + folio = folio_alloc(gfp, 0); |
| 57 | + if (!folio) |
| 58 | + return -ENOMEM; |
| 59 | + |
| 60 | + folio->mapping = mapping; |
| 61 | + folio->index = *_cur_size / PAGE_SIZE; |
| 62 | + trace_netfs_folio(folio, netfs_folio_trace_alloc_buffer); |
| 63 | + slot = folioq_append_mark(tail, folio); |
| 64 | + *_cur_size += folioq_folio_size(tail, slot); |
| 65 | + } while (*_cur_size < size); |
| 66 | + |
| 67 | + return 0; |
| 68 | +} |
| 69 | +EXPORT_SYMBOL(netfs_alloc_folioq_buffer); |
| 70 | + |
| 71 | +/** |
| 72 | + * netfs_free_folioq_buffer - Free a folio queue. |
| 73 | + * @fq: The start of the folio queue to free |
| 74 | + * |
| 75 | + * Free up a chain of folio_queues and, if marked, the marked folios they point |
| 76 | + * to. |
| 77 | + */ |
| 78 | +void netfs_free_folioq_buffer(struct folio_queue *fq) |
| 79 | +{ |
| 80 | + struct folio_queue *next; |
| 81 | + struct folio_batch fbatch; |
| 82 | + |
| 83 | + folio_batch_init(&fbatch); |
| 84 | + |
| 85 | + for (; fq; fq = next) { |
| 86 | + for (int slot = 0; slot < folioq_count(fq); slot++) { |
| 87 | + struct folio *folio = folioq_folio(fq, slot); |
| 88 | + |
| 89 | + if (!folio || |
| 90 | + !folioq_is_marked(fq, slot)) |
| 91 | + continue; |
| 92 | + |
| 93 | + trace_netfs_folio(folio, netfs_folio_trace_put); |
| 94 | + if (folio_batch_add(&fbatch, folio)) |
| 95 | + folio_batch_release(&fbatch); |
| 96 | + } |
| 97 | + |
| 98 | + netfs_stat_d(&netfs_n_folioq); |
| 99 | + next = fq->next; |
| 100 | + kfree(fq); |
| 101 | + } |
| 102 | + |
| 103 | + folio_batch_release(&fbatch); |
| 104 | +} |
| 105 | +EXPORT_SYMBOL(netfs_free_folioq_buffer); |
| 106 | + |
11 | 107 | /*
|
12 | 108 | * Reset the subrequest iterator to refer just to the region remaining to be
|
13 | 109 | * read. The iterator may or may not have been advanced by socket ops or
|
|
0 commit comments