@@ -129,6 +129,7 @@ static inline struct bio *erofs_read_raw_page(struct bio *bio,
129
129
struct page * page ,
130
130
erofs_off_t * last_block ,
131
131
unsigned int nblocks ,
132
+ unsigned int * eblks ,
132
133
bool ra )
133
134
{
134
135
struct inode * const inode = mapping -> host ;
@@ -145,8 +146,7 @@ static inline struct bio *erofs_read_raw_page(struct bio *bio,
145
146
146
147
/* note that for readpage case, bio also equals to NULL */
147
148
if (bio &&
148
- /* not continuous */
149
- * last_block + 1 != current_block ) {
149
+ (* last_block + 1 != current_block || !* eblks )) {
150
150
submit_bio_retry :
151
151
submit_bio (bio );
152
152
bio = NULL ;
@@ -216,7 +216,8 @@ static inline struct bio *erofs_read_raw_page(struct bio *bio,
216
216
if (nblocks > DIV_ROUND_UP (map .m_plen , PAGE_SIZE ))
217
217
nblocks = DIV_ROUND_UP (map .m_plen , PAGE_SIZE );
218
218
219
- bio = bio_alloc (GFP_NOIO , bio_max_segs (nblocks ));
219
+ * eblks = bio_max_segs (nblocks );
220
+ bio = bio_alloc (GFP_NOIO , * eblks );
220
221
221
222
bio -> bi_end_io = erofs_readendio ;
222
223
bio_set_dev (bio , sb -> s_bdev );
@@ -229,16 +230,8 @@ static inline struct bio *erofs_read_raw_page(struct bio *bio,
229
230
/* out of the extent or bio is full */
230
231
if (err < PAGE_SIZE )
231
232
goto submit_bio_retry ;
232
-
233
+ -- * eblks ;
233
234
* last_block = current_block ;
234
-
235
- /* shift in advance in case of it followed by too many gaps */
236
- if (bio -> bi_iter .bi_size >= bio -> bi_max_vecs * PAGE_SIZE ) {
237
- /* err should reassign to 0 after submitting */
238
- err = 0 ;
239
- goto submit_bio_out ;
240
- }
241
-
242
235
return bio ;
243
236
244
237
err_out :
@@ -252,7 +245,6 @@ static inline struct bio *erofs_read_raw_page(struct bio *bio,
252
245
253
246
/* if updated manually, continuous pages has a gap */
254
247
if (bio )
255
- submit_bio_out :
256
248
submit_bio (bio );
257
249
return err ? ERR_PTR (err ) : NULL ;
258
250
}
@@ -264,23 +256,26 @@ static inline struct bio *erofs_read_raw_page(struct bio *bio,
264
256
static int erofs_raw_access_readpage (struct file * file , struct page * page )
265
257
{
266
258
erofs_off_t last_block ;
259
+ unsigned int eblks ;
267
260
struct bio * bio ;
268
261
269
262
trace_erofs_readpage (page , true);
270
263
271
264
bio = erofs_read_raw_page (NULL , page -> mapping ,
272
- page , & last_block , 1 , false);
265
+ page , & last_block , 1 , & eblks , false);
273
266
274
267
if (IS_ERR (bio ))
275
268
return PTR_ERR (bio );
276
269
277
- DBG_BUGON (bio ); /* since we have only one bio -- must be NULL */
270
+ if (bio )
271
+ submit_bio (bio );
278
272
return 0 ;
279
273
}
280
274
281
275
static void erofs_raw_access_readahead (struct readahead_control * rac )
282
276
{
283
277
erofs_off_t last_block ;
278
+ unsigned int eblks ;
284
279
struct bio * bio = NULL ;
285
280
struct page * page ;
286
281
@@ -291,7 +286,7 @@ static void erofs_raw_access_readahead(struct readahead_control *rac)
291
286
prefetchw (& page -> flags );
292
287
293
288
bio = erofs_read_raw_page (bio , rac -> mapping , page , & last_block ,
294
- readahead_count (rac ), true);
289
+ readahead_count (rac ), & eblks , true);
295
290
296
291
/* all the page errors are ignored when readahead */
297
292
if (IS_ERR (bio )) {
@@ -305,7 +300,6 @@ static void erofs_raw_access_readahead(struct readahead_control *rac)
305
300
put_page (page );
306
301
}
307
302
308
- /* the rare case (end in gaps) */
309
303
if (bio )
310
304
submit_bio (bio );
311
305
}
0 commit comments