@@ -345,9 +345,8 @@ static int z_erofs_lookup_collection(struct z_erofs_collector *clt,
345
345
struct z_erofs_pcluster * pcl ;
346
346
struct z_erofs_collection * cl ;
347
347
unsigned int length ;
348
- bool tag ;
349
348
350
- grp = erofs_find_workgroup (inode -> i_sb , map -> m_pa >> PAGE_SHIFT , & tag );
349
+ grp = erofs_find_workgroup (inode -> i_sb , map -> m_pa >> PAGE_SHIFT );
351
350
if (!grp )
352
351
return - ENOENT ;
353
352
@@ -438,7 +437,7 @@ static int z_erofs_register_collection(struct z_erofs_collector *clt,
438
437
*/
439
438
mutex_trylock (& cl -> lock );
440
439
441
- err = erofs_register_workgroup (inode -> i_sb , & pcl -> obj , 0 );
440
+ err = erofs_register_workgroup (inode -> i_sb , & pcl -> obj );
442
441
if (err ) {
443
442
mutex_unlock (& cl -> lock );
444
443
kmem_cache_free (pcluster_cachep , pcl );
@@ -1149,21 +1148,7 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
1149
1148
qtail [JQ_BYPASS ] = & pcl -> next ;
1150
1149
}
1151
1150
1152
- static bool postsubmit_is_all_bypassed (struct z_erofs_decompressqueue * q [],
1153
- unsigned int nr_bios , bool force_fg )
1154
- {
1155
- /*
1156
- * although background is preferred, no one is pending for submission.
1157
- * don't issue workqueue for decompression but drop it directly instead.
1158
- */
1159
- if (force_fg || nr_bios )
1160
- return false;
1161
-
1162
- kvfree (q [JQ_SUBMIT ]);
1163
- return true;
1164
- }
1165
-
1166
- static bool z_erofs_submit_queue (struct super_block * sb ,
1151
+ static void z_erofs_submit_queue (struct super_block * sb ,
1167
1152
z_erofs_next_pcluster_t owned_head ,
1168
1153
struct list_head * pagepool ,
1169
1154
struct z_erofs_decompressqueue * fgq ,
@@ -1172,19 +1157,12 @@ static bool z_erofs_submit_queue(struct super_block *sb,
1172
1157
struct erofs_sb_info * const sbi = EROFS_SB (sb );
1173
1158
z_erofs_next_pcluster_t qtail [NR_JOBQUEUES ];
1174
1159
struct z_erofs_decompressqueue * q [NR_JOBQUEUES ];
1175
- struct bio * bio ;
1176
1160
void * bi_private ;
1177
1161
/* since bio will be NULL, no need to initialize last_index */
1178
1162
pgoff_t uninitialized_var (last_index );
1179
- bool force_submit = false;
1180
- unsigned int nr_bios ;
1181
-
1182
- if (owned_head == Z_EROFS_PCLUSTER_TAIL )
1183
- return false;
1163
+ unsigned int nr_bios = 0 ;
1164
+ struct bio * bio = NULL ;
1184
1165
1185
- force_submit = false;
1186
- bio = NULL ;
1187
- nr_bios = 0 ;
1188
1166
bi_private = jobqueueset_init (sb , q , fgq , force_fg );
1189
1167
qtail [JQ_BYPASS ] = & q [JQ_BYPASS ]-> head ;
1190
1168
qtail [JQ_SUBMIT ] = & q [JQ_SUBMIT ]-> head ;
@@ -1194,67 +1172,60 @@ static bool z_erofs_submit_queue(struct super_block *sb,
1194
1172
1195
1173
do {
1196
1174
struct z_erofs_pcluster * pcl ;
1197
- unsigned int clusterpages ;
1198
- pgoff_t first_index ;
1199
- struct page * page ;
1200
- unsigned int i = 0 , bypass = 0 ;
1201
- int err ;
1175
+ pgoff_t cur , end ;
1176
+ unsigned int i = 0 ;
1177
+ bool bypass = true;
1202
1178
1203
1179
/* no possible 'owned_head' equals the following */
1204
1180
DBG_BUGON (owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED );
1205
1181
DBG_BUGON (owned_head == Z_EROFS_PCLUSTER_NIL );
1206
1182
1207
1183
pcl = container_of (owned_head , struct z_erofs_pcluster , next );
1208
1184
1209
- clusterpages = BIT (pcl -> clusterbits );
1185
+ cur = pcl -> obj .index ;
1186
+ end = cur + BIT (pcl -> clusterbits );
1210
1187
1211
1188
/* close the main owned chain at first */
1212
1189
owned_head = cmpxchg (& pcl -> next , Z_EROFS_PCLUSTER_TAIL ,
1213
1190
Z_EROFS_PCLUSTER_TAIL_CLOSED );
1214
1191
1215
- first_index = pcl -> obj .index ;
1216
- force_submit |= (first_index != last_index + 1 );
1192
+ do {
1193
+ struct page * page ;
1194
+ int err ;
1217
1195
1218
- repeat :
1219
- page = pickup_page_for_submission (pcl , i , pagepool ,
1220
- MNGD_MAPPING (sbi ),
1221
- GFP_NOFS );
1222
- if (!page ) {
1223
- force_submit = true;
1224
- ++ bypass ;
1225
- goto skippage ;
1226
- }
1196
+ page = pickup_page_for_submission (pcl , i ++ , pagepool ,
1197
+ MNGD_MAPPING (sbi ),
1198
+ GFP_NOFS );
1199
+ if (!page )
1200
+ continue ;
1227
1201
1228
- if (bio && force_submit ) {
1202
+ if (bio && cur != last_index + 1 ) {
1229
1203
submit_bio_retry :
1230
- submit_bio (bio );
1231
- bio = NULL ;
1232
- }
1233
-
1234
- if (!bio ) {
1235
- bio = bio_alloc (GFP_NOIO , BIO_MAX_PAGES );
1204
+ submit_bio (bio );
1205
+ bio = NULL ;
1206
+ }
1236
1207
1237
- bio -> bi_end_io = z_erofs_decompressqueue_endio ;
1238
- bio_set_dev (bio , sb -> s_bdev );
1239
- bio -> bi_iter .bi_sector = (sector_t )(first_index + i ) <<
1240
- LOG_SECTORS_PER_BLOCK ;
1241
- bio -> bi_private = bi_private ;
1242
- bio -> bi_opf = REQ_OP_READ ;
1208
+ if (!bio ) {
1209
+ bio = bio_alloc (GFP_NOIO , BIO_MAX_PAGES );
1243
1210
1244
- ++ nr_bios ;
1245
- }
1211
+ bio -> bi_end_io = z_erofs_decompressqueue_endio ;
1212
+ bio_set_dev (bio , sb -> s_bdev );
1213
+ bio -> bi_iter .bi_sector = (sector_t )cur <<
1214
+ LOG_SECTORS_PER_BLOCK ;
1215
+ bio -> bi_private = bi_private ;
1216
+ bio -> bi_opf = REQ_OP_READ ;
1217
+ ++ nr_bios ;
1218
+ }
1246
1219
1247
- err = bio_add_page (bio , page , PAGE_SIZE , 0 );
1248
- if (err < PAGE_SIZE )
1249
- goto submit_bio_retry ;
1220
+ err = bio_add_page (bio , page , PAGE_SIZE , 0 );
1221
+ if (err < PAGE_SIZE )
1222
+ goto submit_bio_retry ;
1250
1223
1251
- force_submit = false;
1252
- last_index = first_index + i ;
1253
- skippage :
1254
- if (++ i < clusterpages )
1255
- goto repeat ;
1224
+ last_index = cur ;
1225
+ bypass = false;
1226
+ } while (++ cur < end );
1256
1227
1257
- if (bypass < clusterpages )
1228
+ if (! bypass )
1258
1229
qtail [JQ_SUBMIT ] = & pcl -> next ;
1259
1230
else
1260
1231
move_to_bypass_jobqueue (pcl , qtail , owned_head );
@@ -1263,11 +1234,15 @@ static bool z_erofs_submit_queue(struct super_block *sb,
1263
1234
if (bio )
1264
1235
submit_bio (bio );
1265
1236
1266
- if (postsubmit_is_all_bypassed (q , nr_bios , * force_fg ))
1267
- return true;
1268
-
1237
+ /*
1238
+ * although background is preferred, no one is pending for submission.
1239
+ * don't issue workqueue for decompression but drop it directly instead.
1240
+ */
1241
+ if (!* force_fg && !nr_bios ) {
1242
+ kvfree (q [JQ_SUBMIT ]);
1243
+ return ;
1244
+ }
1269
1245
z_erofs_decompress_kickoff (q [JQ_SUBMIT ], * force_fg , nr_bios );
1270
- return true;
1271
1246
}
1272
1247
1273
1248
static void z_erofs_runqueue (struct super_block * sb ,
@@ -1276,9 +1251,9 @@ static void z_erofs_runqueue(struct super_block *sb,
1276
1251
{
1277
1252
struct z_erofs_decompressqueue io [NR_JOBQUEUES ];
1278
1253
1279
- if (!z_erofs_submit_queue (sb , clt -> owned_head ,
1280
- pagepool , io , & force_fg ))
1254
+ if (clt -> owned_head == Z_EROFS_PCLUSTER_TAIL )
1281
1255
return ;
1256
+ z_erofs_submit_queue (sb , clt -> owned_head , pagepool , io , & force_fg );
1282
1257
1283
1258
/* handle bypass queue (no i/o pclusters) immediately */
1284
1259
z_erofs_decompress_queue (& io [JQ_BYPASS ], pagepool );
0 commit comments