5
5
#include <linux/fscache.h>
6
6
#include "internal.h"
7
7
8
+ static struct netfs_io_request * erofs_fscache_alloc_request (struct address_space * mapping ,
9
+ loff_t start , size_t len )
10
+ {
11
+ struct netfs_io_request * rreq ;
12
+
13
+ rreq = kzalloc (sizeof (struct netfs_io_request ), GFP_KERNEL );
14
+ if (!rreq )
15
+ return ERR_PTR (- ENOMEM );
16
+
17
+ rreq -> start = start ;
18
+ rreq -> len = len ;
19
+ rreq -> mapping = mapping ;
20
+ INIT_LIST_HEAD (& rreq -> subrequests );
21
+ refcount_set (& rreq -> ref , 1 );
22
+ return rreq ;
23
+ }
24
+
25
+ static void erofs_fscache_put_request (struct netfs_io_request * rreq )
26
+ {
27
+ if (!refcount_dec_and_test (& rreq -> ref ))
28
+ return ;
29
+ if (rreq -> cache_resources .ops )
30
+ rreq -> cache_resources .ops -> end_operation (& rreq -> cache_resources );
31
+ kfree (rreq );
32
+ }
33
+
34
+ static void erofs_fscache_put_subrequest (struct netfs_io_subrequest * subreq )
35
+ {
36
+ if (!refcount_dec_and_test (& subreq -> ref ))
37
+ return ;
38
+ erofs_fscache_put_request (subreq -> rreq );
39
+ kfree (subreq );
40
+ }
41
+
42
+ static void erofs_fscache_clear_subrequests (struct netfs_io_request * rreq )
43
+ {
44
+ struct netfs_io_subrequest * subreq ;
45
+
46
+ while (!list_empty (& rreq -> subrequests )) {
47
+ subreq = list_first_entry (& rreq -> subrequests ,
48
+ struct netfs_io_subrequest , rreq_link );
49
+ list_del (& subreq -> rreq_link );
50
+ erofs_fscache_put_subrequest (subreq );
51
+ }
52
+ }
53
+
54
+ static void erofs_fscache_rreq_unlock_folios (struct netfs_io_request * rreq )
55
+ {
56
+ struct netfs_io_subrequest * subreq ;
57
+ struct folio * folio ;
58
+ unsigned int iopos = 0 ;
59
+ pgoff_t start_page = rreq -> start / PAGE_SIZE ;
60
+ pgoff_t last_page = ((rreq -> start + rreq -> len ) / PAGE_SIZE ) - 1 ;
61
+ bool subreq_failed = false;
62
+
63
+ XA_STATE (xas , & rreq -> mapping -> i_pages , start_page );
64
+
65
+ subreq = list_first_entry (& rreq -> subrequests ,
66
+ struct netfs_io_subrequest , rreq_link );
67
+ subreq_failed = (subreq -> error < 0 );
68
+
69
+ rcu_read_lock ();
70
+ xas_for_each (& xas , folio , last_page ) {
71
+ unsigned int pgpos =
72
+ (folio_index (folio ) - start_page ) * PAGE_SIZE ;
73
+ unsigned int pgend = pgpos + folio_size (folio );
74
+ bool pg_failed = false;
75
+
76
+ for (;;) {
77
+ if (!subreq ) {
78
+ pg_failed = true;
79
+ break ;
80
+ }
81
+
82
+ pg_failed |= subreq_failed ;
83
+ if (pgend < iopos + subreq -> len )
84
+ break ;
85
+
86
+ iopos += subreq -> len ;
87
+ if (!list_is_last (& subreq -> rreq_link ,
88
+ & rreq -> subrequests )) {
89
+ subreq = list_next_entry (subreq , rreq_link );
90
+ subreq_failed = (subreq -> error < 0 );
91
+ } else {
92
+ subreq = NULL ;
93
+ subreq_failed = false;
94
+ }
95
+ if (pgend == iopos )
96
+ break ;
97
+ }
98
+
99
+ if (!pg_failed )
100
+ folio_mark_uptodate (folio );
101
+
102
+ folio_unlock (folio );
103
+ }
104
+ rcu_read_unlock ();
105
+ }
106
+
107
+ static void erofs_fscache_rreq_complete (struct netfs_io_request * rreq )
108
+ {
109
+ erofs_fscache_rreq_unlock_folios (rreq );
110
+ erofs_fscache_clear_subrequests (rreq );
111
+ erofs_fscache_put_request (rreq );
112
+ }
113
+
114
+ static void erofc_fscache_subreq_complete (void * priv ,
115
+ ssize_t transferred_or_error , bool was_async )
116
+ {
117
+ struct netfs_io_subrequest * subreq = priv ;
118
+ struct netfs_io_request * rreq = subreq -> rreq ;
119
+
120
+ if (IS_ERR_VALUE (transferred_or_error ))
121
+ subreq -> error = transferred_or_error ;
122
+
123
+ if (atomic_dec_and_test (& rreq -> nr_outstanding ))
124
+ erofs_fscache_rreq_complete (rreq );
125
+
126
+ erofs_fscache_put_subrequest (subreq );
127
+ }
128
+
8
129
/*
9
130
* Read data from fscache and fill the read data into page cache described by
10
- * @start/len , which shall be both aligned with PAGE_SIZE. @pstart describes
131
+ * @rreq , which shall be both aligned with PAGE_SIZE. @pstart describes
11
132
* the start physical address in the cache file.
12
133
*/
13
- static int erofs_fscache_read_folios (struct fscache_cookie * cookie ,
14
- struct address_space * mapping ,
15
- loff_t start , size_t len ,
16
- loff_t pstart )
134
+ static int erofs_fscache_read_folios_async (struct fscache_cookie * cookie ,
135
+ struct netfs_io_request * rreq , loff_t pstart )
17
136
{
18
137
enum netfs_io_source source ;
19
- struct netfs_io_request rreq = {};
20
- struct netfs_io_subrequest subreq = { .rreq = & rreq , };
21
- struct netfs_cache_resources * cres = & rreq .cache_resources ;
22
- struct super_block * sb = mapping -> host -> i_sb ;
138
+ struct super_block * sb = rreq -> mapping -> host -> i_sb ;
139
+ struct netfs_io_subrequest * subreq ;
140
+ struct netfs_cache_resources * cres = & rreq -> cache_resources ;
23
141
struct iov_iter iter ;
142
+ loff_t start = rreq -> start ;
143
+ size_t len = rreq -> len ;
24
144
size_t done = 0 ;
25
145
int ret ;
26
146
147
+ atomic_set (& rreq -> nr_outstanding , 1 );
148
+
27
149
ret = fscache_begin_read_operation (cres , cookie );
28
150
if (ret )
29
- return ret ;
151
+ goto out ;
30
152
31
153
while (done < len ) {
32
- subreq .start = pstart + done ;
33
- subreq .len = len - done ;
34
- subreq .flags = 1 << NETFS_SREQ_ONDEMAND ;
154
+ subreq = kzalloc (sizeof (struct netfs_io_subrequest ),
155
+ GFP_KERNEL );
156
+ if (subreq ) {
157
+ INIT_LIST_HEAD (& subreq -> rreq_link );
158
+ refcount_set (& subreq -> ref , 2 );
159
+ subreq -> rreq = rreq ;
160
+ refcount_inc (& rreq -> ref );
161
+ } else {
162
+ ret = - ENOMEM ;
163
+ goto out ;
164
+ }
165
+
166
+ subreq -> start = pstart + done ;
167
+ subreq -> len = len - done ;
168
+ subreq -> flags = 1 << NETFS_SREQ_ONDEMAND ;
35
169
36
- source = cres -> ops -> prepare_read (& subreq , LLONG_MAX );
37
- if (WARN_ON (subreq .len == 0 ))
170
+ list_add_tail (& subreq -> rreq_link , & rreq -> subrequests );
171
+
172
+ source = cres -> ops -> prepare_read (subreq , LLONG_MAX );
173
+ if (WARN_ON (subreq -> len == 0 ))
38
174
source = NETFS_INVALID_READ ;
39
175
if (source != NETFS_READ_FROM_CACHE ) {
40
176
erofs_err (sb , "failed to fscache prepare_read (source %d)" ,
41
177
source );
42
178
ret = - EIO ;
179
+ subreq -> error = ret ;
180
+ erofs_fscache_put_subrequest (subreq );
43
181
goto out ;
44
182
}
45
183
46
- iov_iter_xarray (& iter , READ , & mapping -> i_pages ,
47
- start + done , subreq .len );
48
- ret = fscache_read (cres , subreq .start , & iter ,
49
- NETFS_READ_HOLE_FAIL , NULL , NULL );
184
+ atomic_inc (& rreq -> nr_outstanding );
185
+
186
+ iov_iter_xarray (& iter , READ , & rreq -> mapping -> i_pages ,
187
+ start + done , subreq -> len );
188
+
189
+ ret = fscache_read (cres , subreq -> start , & iter ,
190
+ NETFS_READ_HOLE_FAIL ,
191
+ erofc_fscache_subreq_complete , subreq );
192
+ if (ret == - EIOCBQUEUED )
193
+ ret = 0 ;
50
194
if (ret ) {
51
195
erofs_err (sb , "failed to fscache_read (ret %d)" , ret );
52
196
goto out ;
53
197
}
54
198
55
- done += subreq . len ;
199
+ done += subreq -> len ;
56
200
}
57
201
out :
58
- fscache_end_operation (cres );
202
+ if (atomic_dec_and_test (& rreq -> nr_outstanding ))
203
+ erofs_fscache_rreq_complete (rreq );
204
+
59
205
return ret ;
60
206
}
61
207
@@ -64,6 +210,7 @@ static int erofs_fscache_meta_readpage(struct file *data, struct page *page)
64
210
int ret ;
65
211
struct folio * folio = page_folio (page );
66
212
struct super_block * sb = folio_mapping (folio )-> host -> i_sb ;
213
+ struct netfs_io_request * rreq ;
67
214
struct erofs_map_dev mdev = {
68
215
.m_deviceid = 0 ,
69
216
.m_pa = folio_pos (folio ),
@@ -73,11 +220,13 @@ static int erofs_fscache_meta_readpage(struct file *data, struct page *page)
73
220
if (ret )
74
221
goto out ;
75
222
76
- ret = erofs_fscache_read_folios (mdev .m_fscache -> cookie ,
77
- folio_mapping (folio ), folio_pos (folio ),
78
- folio_size (folio ), mdev .m_pa );
79
- if (!ret )
80
- folio_mark_uptodate (folio );
223
+ rreq = erofs_fscache_alloc_request (folio_mapping (folio ),
224
+ folio_pos (folio ), folio_size (folio ));
225
+ if (IS_ERR (rreq ))
226
+ goto out ;
227
+
228
+ return erofs_fscache_read_folios_async (mdev .m_fscache -> cookie ,
229
+ rreq , mdev .m_pa );
81
230
out :
82
231
folio_unlock (folio );
83
232
return ret ;
@@ -117,6 +266,7 @@ static int erofs_fscache_readpage(struct file *file, struct page *page)
117
266
struct super_block * sb = inode -> i_sb ;
118
267
struct erofs_map_blocks map ;
119
268
struct erofs_map_dev mdev ;
269
+ struct netfs_io_request * rreq ;
120
270
erofs_off_t pos ;
121
271
loff_t pstart ;
122
272
int ret ;
@@ -149,10 +299,15 @@ static int erofs_fscache_readpage(struct file *file, struct page *page)
149
299
if (ret )
150
300
goto out_unlock ;
151
301
302
+
303
+ rreq = erofs_fscache_alloc_request (folio_mapping (folio ),
304
+ folio_pos (folio ), folio_size (folio ));
305
+ if (IS_ERR (rreq ))
306
+ goto out_unlock ;
307
+
152
308
pstart = mdev .m_pa + (pos - map .m_la );
153
- ret = erofs_fscache_read_folios (mdev .m_fscache -> cookie ,
154
- folio_mapping (folio ), folio_pos (folio ),
155
- folio_size (folio ), pstart );
309
+ return erofs_fscache_read_folios_async (mdev .m_fscache -> cookie ,
310
+ rreq , pstart );
156
311
157
312
out_uptodate :
158
313
if (!ret )
@@ -162,15 +317,16 @@ static int erofs_fscache_readpage(struct file *file, struct page *page)
162
317
return ret ;
163
318
}
164
319
165
- static void erofs_fscache_unlock_folios (struct readahead_control * rac ,
166
- size_t len )
320
+ static void erofs_fscache_advance_folios (struct readahead_control * rac ,
321
+ size_t len , bool unlock )
167
322
{
168
323
while (len ) {
169
324
struct folio * folio = readahead_folio (rac );
170
-
171
325
len -= folio_size (folio );
172
- folio_mark_uptodate (folio );
173
- folio_unlock (folio );
326
+ if (unlock ) {
327
+ folio_mark_uptodate (folio );
328
+ folio_unlock (folio );
329
+ }
174
330
}
175
331
}
176
332
@@ -192,6 +348,7 @@ static void erofs_fscache_readahead(struct readahead_control *rac)
192
348
do {
193
349
struct erofs_map_blocks map ;
194
350
struct erofs_map_dev mdev ;
351
+ struct netfs_io_request * rreq ;
195
352
196
353
pos = start + done ;
197
354
map .m_la = pos ;
@@ -211,7 +368,7 @@ static void erofs_fscache_readahead(struct readahead_control *rac)
211
368
offset , count );
212
369
iov_iter_zero (count , & iter );
213
370
214
- erofs_fscache_unlock_folios (rac , count );
371
+ erofs_fscache_advance_folios (rac , count , true );
215
372
ret = count ;
216
373
continue ;
217
374
}
@@ -237,17 +394,18 @@ static void erofs_fscache_readahead(struct readahead_control *rac)
237
394
if (ret )
238
395
return ;
239
396
240
- ret = erofs_fscache_read_folios ( mdev . m_fscache -> cookie ,
241
- rac -> mapping , offset , count ,
242
- mdev . m_pa + ( pos - map . m_la )) ;
397
+ rreq = erofs_fscache_alloc_request ( rac -> mapping , offset , count );
398
+ if ( IS_ERR ( rreq ))
399
+ return ;
243
400
/*
244
- * For the error cases, the folios will be unlocked when
245
- * .readahead () returns .
401
+ * Drop the ref of folios here. Unlock them in
402
+ * rreq_unlock_folios () when rreq complete .
246
403
*/
247
- if (!ret ) {
248
- erofs_fscache_unlock_folios (rac , count );
404
+ erofs_fscache_advance_folios (rac , count , false);
405
+ ret = erofs_fscache_read_folios_async (mdev .m_fscache -> cookie ,
406
+ rreq , mdev .m_pa + (pos - map .m_la ));
407
+ if (!ret )
249
408
ret = count ;
250
- }
251
409
} while (ret > 0 && ((done += ret ) < len ));
252
410
}
253
411
0 commit comments