|
8 | 8 | #include <linux/swap.h>
|
9 | 9 | #include "internal.h"
|
10 | 10 |
|
| 11 | +/* |
| 12 | + * Attach a folio to the buffer and maybe set marks on it to say that we need |
| 13 | + * to put the folio later and twiddle the pagecache flags. |
| 14 | + */ |
| 15 | +int netfs_xa_store_and_mark(struct xarray *xa, unsigned long index, |
| 16 | + struct folio *folio, unsigned int flags, |
| 17 | + gfp_t gfp_mask) |
| 18 | +{ |
| 19 | + XA_STATE_ORDER(xas, xa, index, folio_order(folio)); |
| 20 | + |
| 21 | +retry: |
| 22 | + xas_lock(&xas); |
| 23 | + for (;;) { |
| 24 | + xas_store(&xas, folio); |
| 25 | + if (!xas_error(&xas)) |
| 26 | + break; |
| 27 | + xas_unlock(&xas); |
| 28 | + if (!xas_nomem(&xas, gfp_mask)) |
| 29 | + return xas_error(&xas); |
| 30 | + goto retry; |
| 31 | + } |
| 32 | + |
| 33 | + if (flags & NETFS_FLAG_PUT_MARK) |
| 34 | + xas_set_mark(&xas, NETFS_BUF_PUT_MARK); |
| 35 | + if (flags & NETFS_FLAG_PAGECACHE_MARK) |
| 36 | + xas_set_mark(&xas, NETFS_BUF_PAGECACHE_MARK); |
| 37 | + xas_unlock(&xas); |
| 38 | + return xas_error(&xas); |
| 39 | +} |
| 40 | + |
| 41 | +/* |
| 42 | + * Create the specified range of folios in the buffer attached to the read |
| 43 | + * request. The folios are marked with NETFS_BUF_PUT_MARK so that we know that |
| 44 | + * these need freeing later. |
| 45 | + */ |
| 46 | +int netfs_add_folios_to_buffer(struct xarray *buffer, |
| 47 | + struct address_space *mapping, |
| 48 | + pgoff_t index, pgoff_t to, gfp_t gfp_mask) |
| 49 | +{ |
| 50 | + struct folio *folio; |
| 51 | + int ret; |
| 52 | + |
| 53 | + if (to + 1 == index) /* Page range is inclusive */ |
| 54 | + return 0; |
| 55 | + |
| 56 | + do { |
| 57 | + /* TODO: Figure out what order folio can be allocated here */ |
| 58 | + folio = filemap_alloc_folio(readahead_gfp_mask(mapping), 0); |
| 59 | + if (!folio) |
| 60 | + return -ENOMEM; |
| 61 | + folio->index = index; |
| 62 | + ret = netfs_xa_store_and_mark(buffer, index, folio, |
| 63 | + NETFS_FLAG_PUT_MARK, gfp_mask); |
| 64 | + if (ret < 0) { |
| 65 | + folio_put(folio); |
| 66 | + return ret; |
| 67 | + } |
| 68 | + |
| 69 | + index += folio_nr_pages(folio); |
| 70 | + } while (index <= to && index != 0); |
| 71 | + |
| 72 | + return 0; |
| 73 | +} |
| 74 | + |
| 75 | +/* |
| 76 | + * Clear an xarray buffer, putting a ref on the folios that have |
| 77 | + * NETFS_BUF_PUT_MARK set. |
| 78 | + */ |
| 79 | +void netfs_clear_buffer(struct xarray *buffer) |
| 80 | +{ |
| 81 | + struct folio *folio; |
| 82 | + XA_STATE(xas, buffer, 0); |
| 83 | + |
| 84 | + rcu_read_lock(); |
| 85 | + xas_for_each_marked(&xas, folio, ULONG_MAX, NETFS_BUF_PUT_MARK) { |
| 86 | + folio_put(folio); |
| 87 | + } |
| 88 | + rcu_read_unlock(); |
| 89 | + xa_destroy(buffer); |
| 90 | +} |
| 91 | + |
11 | 92 | /**
|
12 | 93 | * netfs_dirty_folio - Mark folio dirty and pin a cache object for writeback
|
13 | 94 | * @mapping: The mapping the folio belongs to.
|
|
0 commit comments