Skip to content

Commit f2151df

Browse files
lostjefflehsiangkao
authored andcommitted
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data read from fscache is put directly into the target page cache. As the support for compressed data in fscache mode is going to be introduced, rework the fscache internals so that the following compressed part could make the raw data read from fscache be directed to the target buffer it wants, decompress the raw data, and finally fill the page cache with the decompressed data. As the first step, a new structure, i.e. erofs_fscache_io (io), is introduced to describe a generic read request from the fscache, while the caller can specify the target buffer it wants in the iov_iter structure (io->iter). Besides, the caller can also specify its completion callback and private data through erofs_fscache_io, which will be called to make further handling, e.g. unlocking the page cache for uncompressed data or decompressing the read raw data, when the read request from the fscache completes. Now erofs_fscache_read_io_async() serves as a generic interface for reading raw data from fscache for both compressed and uncompressed data. The erofs_fscache_rq structure is kept to describe a request to fill the page cache in the specified range. Signed-off-by: Jingbo Xu <[email protected]> Reviewed-by: Gao Xiang <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Gao Xiang <[email protected]>
1 parent 0f28be6 commit f2151df

File tree

1 file changed

+123
-112
lines changed

1 file changed

+123
-112
lines changed

fs/erofs/fscache.c

Lines changed: 123 additions & 112 deletions
Original file line numberDiff line numberDiff line change
@@ -25,9 +25,15 @@ static struct file_system_type erofs_anon_fs_type = {
2525
.kill_sb = kill_anon_super,
2626
};
2727

28-
struct erofs_fscache_request {
29-
struct erofs_fscache_request *primary;
30-
struct netfs_cache_resources cache_resources;
28+
struct erofs_fscache_io {
29+
struct netfs_cache_resources cres;
30+
struct iov_iter iter;
31+
netfs_io_terminated_t end_io;
32+
void *private;
33+
refcount_t ref;
34+
};
35+
36+
struct erofs_fscache_rq {
3137
struct address_space *mapping; /* The mapping being accessed */
3238
loff_t start; /* Start position */
3339
size_t len; /* Length of the request */
@@ -36,44 +42,17 @@ struct erofs_fscache_request {
3642
refcount_t ref;
3743
};
3844

39-
static struct erofs_fscache_request *erofs_fscache_req_alloc(struct address_space *mapping,
40-
loff_t start, size_t len)
41-
{
42-
struct erofs_fscache_request *req;
43-
44-
req = kzalloc(sizeof(struct erofs_fscache_request), GFP_KERNEL);
45-
if (!req)
46-
return ERR_PTR(-ENOMEM);
47-
48-
req->mapping = mapping;
49-
req->start = start;
50-
req->len = len;
51-
refcount_set(&req->ref, 1);
52-
53-
return req;
54-
}
55-
56-
static struct erofs_fscache_request *erofs_fscache_req_chain(struct erofs_fscache_request *primary,
57-
size_t len)
45+
static bool erofs_fscache_io_put(struct erofs_fscache_io *io)
5846
{
59-
struct erofs_fscache_request *req;
60-
61-
/* use primary request for the first submission */
62-
if (!primary->submitted) {
63-
refcount_inc(&primary->ref);
64-
return primary;
65-
}
66-
67-
req = erofs_fscache_req_alloc(primary->mapping,
68-
primary->start + primary->submitted, len);
69-
if (!IS_ERR(req)) {
70-
req->primary = primary;
71-
refcount_inc(&primary->ref);
72-
}
73-
return req;
47+
if (!refcount_dec_and_test(&io->ref))
48+
return false;
49+
if (io->cres.ops)
50+
io->cres.ops->end_operation(&io->cres);
51+
kfree(io);
52+
return true;
7453
}
7554

76-
static void erofs_fscache_req_complete(struct erofs_fscache_request *req)
55+
static void erofs_fscache_req_complete(struct erofs_fscache_rq *req)
7756
{
7857
struct folio *folio;
7958
bool failed = req->error;
@@ -93,120 +72,149 @@ static void erofs_fscache_req_complete(struct erofs_fscache_request *req)
9372
rcu_read_unlock();
9473
}
9574

96-
static void erofs_fscache_req_put(struct erofs_fscache_request *req)
75+
static void erofs_fscache_req_put(struct erofs_fscache_rq *req)
9776
{
98-
if (refcount_dec_and_test(&req->ref)) {
99-
if (req->cache_resources.ops)
100-
req->cache_resources.ops->end_operation(&req->cache_resources);
101-
if (!req->primary)
102-
erofs_fscache_req_complete(req);
103-
else
104-
erofs_fscache_req_put(req->primary);
105-
kfree(req);
106-
}
77+
if (!refcount_dec_and_test(&req->ref))
78+
return;
79+
erofs_fscache_req_complete(req);
80+
kfree(req);
81+
}
82+
83+
static struct erofs_fscache_rq *erofs_fscache_req_alloc(struct address_space *mapping,
84+
loff_t start, size_t len)
85+
{
86+
struct erofs_fscache_rq *req = kzalloc(sizeof(*req), GFP_KERNEL);
87+
88+
if (!req)
89+
return NULL;
90+
req->mapping = mapping;
91+
req->start = start;
92+
req->len = len;
93+
refcount_set(&req->ref, 1);
94+
return req;
10795
}
10896

109-
static void erofs_fscache_subreq_complete(void *priv,
97+
static void erofs_fscache_req_io_put(struct erofs_fscache_io *io)
98+
{
99+
struct erofs_fscache_rq *req = io->private;
100+
101+
if (erofs_fscache_io_put(io))
102+
erofs_fscache_req_put(req);
103+
}
104+
105+
static void erofs_fscache_req_end_io(void *priv,
110106
ssize_t transferred_or_error, bool was_async)
111107
{
112-
struct erofs_fscache_request *req = priv;
108+
struct erofs_fscache_io *io = priv;
109+
struct erofs_fscache_rq *req = io->private;
113110

114-
if (IS_ERR_VALUE(transferred_or_error)) {
115-
if (req->primary)
116-
req->primary->error = transferred_or_error;
117-
else
118-
req->error = transferred_or_error;
119-
}
120-
erofs_fscache_req_put(req);
111+
if (IS_ERR_VALUE(transferred_or_error))
112+
req->error = transferred_or_error;
113+
erofs_fscache_req_io_put(io);
114+
}
115+
116+
static struct erofs_fscache_io *erofs_fscache_req_io_alloc(struct erofs_fscache_rq *req)
117+
{
118+
struct erofs_fscache_io *io = kzalloc(sizeof(*io), GFP_KERNEL);
119+
120+
if (!io)
121+
return NULL;
122+
io->end_io = erofs_fscache_req_end_io;
123+
io->private = req;
124+
refcount_inc(&req->ref);
125+
refcount_set(&io->ref, 1);
126+
return io;
121127
}
122128

123129
/*
124-
* Read data from fscache (cookie, pstart, len), and fill the read data into
125-
* page cache described by (req->mapping, lstart, len). @pstart describeis the
126-
* start physical address in the cache file.
130+
* Read data from fscache described by cookie at pstart physical address
131+
* offset, and fill the read data into buffer described by io->iter.
127132
*/
128-
static int erofs_fscache_read_folios_async(struct fscache_cookie *cookie,
129-
struct erofs_fscache_request *req, loff_t pstart, size_t len)
133+
static int erofs_fscache_read_io_async(struct fscache_cookie *cookie,
134+
loff_t pstart, struct erofs_fscache_io *io)
130135
{
131136
enum netfs_io_source source;
132-
struct super_block *sb = req->mapping->host->i_sb;
133-
struct netfs_cache_resources *cres = &req->cache_resources;
134-
struct iov_iter iter;
135-
loff_t lstart = req->start + req->submitted;
136-
size_t done = 0;
137+
struct netfs_cache_resources *cres = &io->cres;
138+
struct iov_iter *iter = &io->iter;
137139
int ret;
138140

139-
DBG_BUGON(len > req->len - req->submitted);
140-
141141
ret = fscache_begin_read_operation(cres, cookie);
142142
if (ret)
143143
return ret;
144144

145-
while (done < len) {
146-
loff_t sstart = pstart + done;
147-
size_t slen = len - done;
145+
while (iov_iter_count(iter)) {
146+
size_t orig_count = iov_iter_count(iter), len = orig_count;
148147
unsigned long flags = 1 << NETFS_SREQ_ONDEMAND;
149148

150149
source = cres->ops->prepare_ondemand_read(cres,
151-
sstart, &slen, LLONG_MAX, &flags, 0);
152-
if (WARN_ON(slen == 0))
150+
pstart, &len, LLONG_MAX, &flags, 0);
151+
if (WARN_ON(len == 0))
153152
source = NETFS_INVALID_READ;
154153
if (source != NETFS_READ_FROM_CACHE) {
155-
erofs_err(sb, "failed to fscache prepare_read (source %d)", source);
154+
erofs_err(NULL, "prepare_read failed (source %d)", source);
156155
return -EIO;
157156
}
158157

159-
refcount_inc(&req->ref);
160-
iov_iter_xarray(&iter, ITER_DEST, &req->mapping->i_pages,
161-
lstart + done, slen);
162-
163-
ret = fscache_read(cres, sstart, &iter, NETFS_READ_HOLE_FAIL,
164-
erofs_fscache_subreq_complete, req);
158+
iov_iter_truncate(iter, len);
159+
refcount_inc(&io->ref);
160+
ret = fscache_read(cres, pstart, iter, NETFS_READ_HOLE_FAIL,
161+
io->end_io, io);
165162
if (ret == -EIOCBQUEUED)
166163
ret = 0;
167164
if (ret) {
168-
erofs_err(sb, "failed to fscache_read (ret %d)", ret);
165+
erofs_err(NULL, "fscache_read failed (ret %d)", ret);
169166
return ret;
170167
}
168+
if (WARN_ON(iov_iter_count(iter)))
169+
return -EIO;
171170

172-
done += slen;
171+
iov_iter_reexpand(iter, orig_count - len);
172+
pstart += len;
173173
}
174-
DBG_BUGON(done != len);
175174
return 0;
176175
}
177176

178177
static int erofs_fscache_meta_read_folio(struct file *data, struct folio *folio)
179178
{
180-
int ret;
181179
struct erofs_fscache *ctx = folio->mapping->host->i_private;
182-
struct erofs_fscache_request *req;
180+
int ret = -ENOMEM;
181+
struct erofs_fscache_rq *req;
182+
struct erofs_fscache_io *io;
183183

184184
req = erofs_fscache_req_alloc(folio->mapping,
185185
folio_pos(folio), folio_size(folio));
186-
if (IS_ERR(req)) {
186+
if (!req) {
187187
folio_unlock(folio);
188-
return PTR_ERR(req);
188+
return ret;
189189
}
190190

191-
ret = erofs_fscache_read_folios_async(ctx->cookie, req,
192-
folio_pos(folio), folio_size(folio));
191+
io = erofs_fscache_req_io_alloc(req);
192+
if (!io) {
193+
req->error = ret;
194+
goto out;
195+
}
196+
iov_iter_xarray(&io->iter, ITER_DEST, &folio->mapping->i_pages,
197+
folio_pos(folio), folio_size(folio));
198+
199+
ret = erofs_fscache_read_io_async(ctx->cookie, folio_pos(folio), io);
193200
if (ret)
194201
req->error = ret;
195202

203+
erofs_fscache_req_io_put(io);
204+
out:
196205
erofs_fscache_req_put(req);
197206
return ret;
198207
}
199208

200-
static int erofs_fscache_data_read_slice(struct erofs_fscache_request *primary)
209+
static int erofs_fscache_data_read_slice(struct erofs_fscache_rq *req)
201210
{
202-
struct address_space *mapping = primary->mapping;
211+
struct address_space *mapping = req->mapping;
203212
struct inode *inode = mapping->host;
204213
struct super_block *sb = inode->i_sb;
205-
struct erofs_fscache_request *req;
214+
struct erofs_fscache_io *io;
206215
struct erofs_map_blocks map;
207216
struct erofs_map_dev mdev;
208-
struct iov_iter iter;
209-
loff_t pos = primary->start + primary->submitted;
217+
loff_t pos = req->start + req->submitted;
210218
size_t count;
211219
int ret;
212220

@@ -217,6 +225,7 @@ static int erofs_fscache_data_read_slice(struct erofs_fscache_request *primary)
217225

218226
if (map.m_flags & EROFS_MAP_META) {
219227
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
228+
struct iov_iter iter;
220229
erofs_blk_t blknr;
221230
size_t offset, size;
222231
void *src;
@@ -237,15 +246,17 @@ static int erofs_fscache_data_read_slice(struct erofs_fscache_request *primary)
237246
}
238247
iov_iter_zero(PAGE_SIZE - size, &iter);
239248
erofs_put_metabuf(&buf);
240-
primary->submitted += PAGE_SIZE;
249+
req->submitted += PAGE_SIZE;
241250
return 0;
242251
}
243252

244-
count = primary->len - primary->submitted;
253+
count = req->len - req->submitted;
245254
if (!(map.m_flags & EROFS_MAP_MAPPED)) {
255+
struct iov_iter iter;
256+
246257
iov_iter_xarray(&iter, ITER_DEST, &mapping->i_pages, pos, count);
247258
iov_iter_zero(count, &iter);
248-
primary->submitted += count;
259+
req->submitted += count;
249260
return 0;
250261
}
251262

@@ -260,18 +271,19 @@ static int erofs_fscache_data_read_slice(struct erofs_fscache_request *primary)
260271
if (ret)
261272
return ret;
262273

263-
req = erofs_fscache_req_chain(primary, count);
264-
if (IS_ERR(req))
265-
return PTR_ERR(req);
274+
io = erofs_fscache_req_io_alloc(req);
275+
if (!io)
276+
return -ENOMEM;
277+
iov_iter_xarray(&io->iter, ITER_DEST, &mapping->i_pages, pos, count);
278+
ret = erofs_fscache_read_io_async(mdev.m_fscache->cookie,
279+
mdev.m_pa + (pos - map.m_la), io);
280+
erofs_fscache_req_io_put(io);
266281

267-
ret = erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
268-
req, mdev.m_pa + (pos - map.m_la), count);
269-
erofs_fscache_req_put(req);
270-
primary->submitted += count;
282+
req->submitted += count;
271283
return ret;
272284
}
273285

274-
static int erofs_fscache_data_read(struct erofs_fscache_request *req)
286+
static int erofs_fscache_data_read(struct erofs_fscache_rq *req)
275287
{
276288
int ret;
277289

@@ -280,20 +292,19 @@ static int erofs_fscache_data_read(struct erofs_fscache_request *req)
280292
if (ret)
281293
req->error = ret;
282294
} while (!ret && req->submitted < req->len);
283-
284295
return ret;
285296
}
286297

287298
static int erofs_fscache_read_folio(struct file *file, struct folio *folio)
288299
{
289-
struct erofs_fscache_request *req;
300+
struct erofs_fscache_rq *req;
290301
int ret;
291302

292303
req = erofs_fscache_req_alloc(folio->mapping,
293304
folio_pos(folio), folio_size(folio));
294-
if (IS_ERR(req)) {
305+
if (!req) {
295306
folio_unlock(folio);
296-
return PTR_ERR(req);
307+
return -ENOMEM;
297308
}
298309

299310
ret = erofs_fscache_data_read(req);
@@ -303,14 +314,14 @@ static int erofs_fscache_read_folio(struct file *file, struct folio *folio)
303314

304315
static void erofs_fscache_readahead(struct readahead_control *rac)
305316
{
306-
struct erofs_fscache_request *req;
317+
struct erofs_fscache_rq *req;
307318

308319
if (!readahead_count(rac))
309320
return;
310321

311322
req = erofs_fscache_req_alloc(rac->mapping,
312323
readahead_pos(rac), readahead_length(rac));
313-
if (IS_ERR(req))
324+
if (!req)
314325
return;
315326

316327
/* The request completion will drop refs on the folios. */

0 commit comments

Comments
 (0)