@@ -214,7 +214,8 @@ struct crypt_config {
214
214
215
215
unsigned int integrity_tag_size ;
216
216
unsigned int integrity_iv_size ;
217
- unsigned int on_disk_tag_size ;
217
+ unsigned int used_tag_size ;
218
+ unsigned int tuple_size ;
218
219
219
220
/*
220
221
* pool for per bio private data, crypto requests,
@@ -241,6 +242,31 @@ static unsigned int dm_crypt_clients_n;
241
242
static volatile unsigned long dm_crypt_pages_per_client ;
242
243
#define DM_CRYPT_MEMORY_PERCENT 2
243
244
#define DM_CRYPT_MIN_PAGES_PER_CLIENT (BIO_MAX_VECS * 16)
245
+ #define DM_CRYPT_DEFAULT_MAX_READ_SIZE 131072
246
+ #define DM_CRYPT_DEFAULT_MAX_WRITE_SIZE 131072
247
+
248
+ static unsigned int max_read_size = 0 ;
249
+ module_param (max_read_size , uint , 0644 );
250
+ MODULE_PARM_DESC (max_read_size , "Maximum size of a read request" );
251
+ static unsigned int max_write_size = 0 ;
252
+ module_param (max_write_size , uint , 0644 );
253
+ MODULE_PARM_DESC (max_write_size , "Maximum size of a write request" );
254
+ static unsigned get_max_request_size (struct crypt_config * cc , bool wrt )
255
+ {
256
+ unsigned val , sector_align ;
257
+ val = !wrt ? READ_ONCE (max_read_size ) : READ_ONCE (max_write_size );
258
+ if (likely (!val ))
259
+ val = !wrt ? DM_CRYPT_DEFAULT_MAX_READ_SIZE : DM_CRYPT_DEFAULT_MAX_WRITE_SIZE ;
260
+ if (wrt || cc -> used_tag_size ) {
261
+ if (unlikely (val > BIO_MAX_VECS << PAGE_SHIFT ))
262
+ val = BIO_MAX_VECS << PAGE_SHIFT ;
263
+ }
264
+ sector_align = max (bdev_logical_block_size (cc -> dev -> bdev ), (unsigned )cc -> sector_size );
265
+ val = round_down (val , sector_align );
266
+ if (unlikely (!val ))
267
+ val = sector_align ;
268
+ return val >> SECTOR_SHIFT ;
269
+ }
244
270
245
271
static void crypt_endio (struct bio * clone );
246
272
static void kcryptd_queue_crypt (struct dm_crypt_io * io );
@@ -1151,14 +1177,14 @@ static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio)
1151
1177
unsigned int tag_len ;
1152
1178
int ret ;
1153
1179
1154
- if (!bio_sectors (bio ) || !io -> cc -> on_disk_tag_size )
1180
+ if (!bio_sectors (bio ) || !io -> cc -> tuple_size )
1155
1181
return 0 ;
1156
1182
1157
1183
bip = bio_integrity_alloc (bio , GFP_NOIO , 1 );
1158
1184
if (IS_ERR (bip ))
1159
1185
return PTR_ERR (bip );
1160
1186
1161
- tag_len = io -> cc -> on_disk_tag_size * (bio_sectors (bio ) >> io -> cc -> sector_shift );
1187
+ tag_len = io -> cc -> tuple_size * (bio_sectors (bio ) >> io -> cc -> sector_shift );
1162
1188
1163
1189
bip -> bip_iter .bi_sector = io -> cc -> start + io -> sector ;
1164
1190
@@ -1182,18 +1208,18 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
1182
1208
return - EINVAL ;
1183
1209
}
1184
1210
1185
- if (bi -> tag_size != cc -> on_disk_tag_size ||
1186
- bi -> tuple_size != cc -> on_disk_tag_size ) {
1211
+ if (bi -> tuple_size < cc -> used_tag_size ) {
1187
1212
ti -> error = "Integrity profile tag size mismatch." ;
1188
1213
return - EINVAL ;
1189
1214
}
1215
+ cc -> tuple_size = bi -> tuple_size ;
1190
1216
if (1 << bi -> interval_exp != cc -> sector_size ) {
1191
1217
ti -> error = "Integrity profile sector size mismatch." ;
1192
1218
return - EINVAL ;
1193
1219
}
1194
1220
1195
1221
if (crypt_integrity_aead (cc )) {
1196
- cc -> integrity_tag_size = cc -> on_disk_tag_size - cc -> integrity_iv_size ;
1222
+ cc -> integrity_tag_size = cc -> used_tag_size - cc -> integrity_iv_size ;
1197
1223
DMDEBUG ("%s: Integrity AEAD, tag size %u, IV size %u." , dm_device_name (md ),
1198
1224
cc -> integrity_tag_size , cc -> integrity_iv_size );
1199
1225
@@ -1205,7 +1231,7 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
1205
1231
DMDEBUG ("%s: Additional per-sector space %u bytes for IV." , dm_device_name (md ),
1206
1232
cc -> integrity_iv_size );
1207
1233
1208
- if ((cc -> integrity_tag_size + cc -> integrity_iv_size ) != bi -> tag_size ) {
1234
+ if ((cc -> integrity_tag_size + cc -> integrity_iv_size ) > cc -> tuple_size ) {
1209
1235
ti -> error = "Not enough space for integrity tag in the profile." ;
1210
1236
return - EINVAL ;
1211
1237
}
@@ -1284,7 +1310,7 @@ static void *tag_from_dmreq(struct crypt_config *cc,
1284
1310
struct dm_crypt_io * io = container_of (ctx , struct dm_crypt_io , ctx );
1285
1311
1286
1312
return & io -> integrity_metadata [* org_tag_of_dmreq (cc , dmreq ) *
1287
- cc -> on_disk_tag_size ];
1313
+ cc -> tuple_size ];
1288
1314
}
1289
1315
1290
1316
static void * iv_tag_from_dmreq (struct crypt_config * cc ,
@@ -1365,9 +1391,9 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
1365
1391
aead_request_set_crypt (req , dmreq -> sg_in , dmreq -> sg_out ,
1366
1392
cc -> sector_size , iv );
1367
1393
r = crypto_aead_encrypt (req );
1368
- if (cc -> integrity_tag_size + cc -> integrity_iv_size != cc -> on_disk_tag_size )
1394
+ if (cc -> integrity_tag_size + cc -> integrity_iv_size != cc -> tuple_size )
1369
1395
memset (tag + cc -> integrity_tag_size + cc -> integrity_iv_size , 0 ,
1370
- cc -> on_disk_tag_size - (cc -> integrity_tag_size + cc -> integrity_iv_size ));
1396
+ cc -> tuple_size - (cc -> integrity_tag_size + cc -> integrity_iv_size ));
1371
1397
} else {
1372
1398
aead_request_set_crypt (req , dmreq -> sg_in , dmreq -> sg_out ,
1373
1399
cc -> sector_size + cc -> integrity_tag_size , iv );
@@ -1797,7 +1823,7 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
1797
1823
return ;
1798
1824
1799
1825
if (likely (!io -> ctx .aead_recheck ) && unlikely (io -> ctx .aead_failed ) &&
1800
- cc -> on_disk_tag_size && bio_data_dir (base_bio ) == READ ) {
1826
+ cc -> used_tag_size && bio_data_dir (base_bio ) == READ ) {
1801
1827
io -> ctx .aead_recheck = true;
1802
1828
io -> ctx .aead_failed = false;
1803
1829
io -> error = 0 ;
@@ -3181,7 +3207,7 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
3181
3207
ti -> error = "Invalid integrity arguments" ;
3182
3208
return - EINVAL ;
3183
3209
}
3184
- cc -> on_disk_tag_size = val ;
3210
+ cc -> used_tag_size = val ;
3185
3211
sval = strchr (opt_string + strlen ("integrity:" ), ':' ) + 1 ;
3186
3212
if (!strcasecmp (sval , "aead" )) {
3187
3213
set_bit (CRYPT_MODE_INTEGRITY_AEAD , & cc -> cipher_flags );
@@ -3393,12 +3419,12 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
3393
3419
if (ret )
3394
3420
goto bad ;
3395
3421
3396
- cc -> tag_pool_max_sectors = POOL_ENTRY_SIZE / cc -> on_disk_tag_size ;
3422
+ cc -> tag_pool_max_sectors = POOL_ENTRY_SIZE / cc -> tuple_size ;
3397
3423
if (!cc -> tag_pool_max_sectors )
3398
3424
cc -> tag_pool_max_sectors = 1 ;
3399
3425
3400
3426
ret = mempool_init_kmalloc_pool (& cc -> tag_pool , MIN_IOS ,
3401
- cc -> tag_pool_max_sectors * cc -> on_disk_tag_size );
3427
+ cc -> tag_pool_max_sectors * cc -> tuple_size );
3402
3428
if (ret ) {
3403
3429
ti -> error = "Cannot allocate integrity tags mempool" ;
3404
3430
goto bad ;
@@ -3474,6 +3500,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
3474
3500
{
3475
3501
struct dm_crypt_io * io ;
3476
3502
struct crypt_config * cc = ti -> private ;
3503
+ unsigned max_sectors ;
3477
3504
3478
3505
/*
3479
3506
* If bio is REQ_PREFLUSH or REQ_OP_DISCARD, just bypass crypt queues.
@@ -3492,9 +3519,9 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
3492
3519
/*
3493
3520
* Check if bio is too large, split as needed.
3494
3521
*/
3495
- if ( unlikely (bio -> bi_iter . bi_size > ( BIO_MAX_VECS << PAGE_SHIFT )) &&
3496
- ( bio_data_dir ( bio ) == WRITE || cc -> on_disk_tag_size ))
3497
- dm_accept_partial_bio (bio , (( BIO_MAX_VECS << PAGE_SHIFT ) >> SECTOR_SHIFT ) );
3522
+ max_sectors = get_max_request_size ( cc , bio_data_dir (bio ) == WRITE );
3523
+ if ( unlikely ( bio_sectors ( bio ) > max_sectors ))
3524
+ dm_accept_partial_bio (bio , max_sectors );
3498
3525
3499
3526
/*
3500
3527
* Ensure that bio is a multiple of internal sector encryption size
@@ -3509,8 +3536,8 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
3509
3536
io = dm_per_bio_data (bio , cc -> per_bio_data_size );
3510
3537
crypt_io_init (io , cc , bio , dm_target_offset (ti , bio -> bi_iter .bi_sector ));
3511
3538
3512
- if (cc -> on_disk_tag_size ) {
3513
- unsigned int tag_len = cc -> on_disk_tag_size * (bio_sectors (bio ) >> cc -> sector_shift );
3539
+ if (cc -> tuple_size ) {
3540
+ unsigned int tag_len = cc -> tuple_size * (bio_sectors (bio ) >> cc -> sector_shift );
3514
3541
3515
3542
if (unlikely (tag_len > KMALLOC_MAX_SIZE ))
3516
3543
io -> integrity_metadata = NULL ;
@@ -3582,7 +3609,7 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
3582
3609
num_feature_args += test_bit (DM_CRYPT_NO_WRITE_WORKQUEUE , & cc -> flags );
3583
3610
num_feature_args += cc -> sector_size != (1 << SECTOR_SHIFT );
3584
3611
num_feature_args += test_bit (CRYPT_IV_LARGE_SECTORS , & cc -> cipher_flags );
3585
- if (cc -> on_disk_tag_size )
3612
+ if (cc -> used_tag_size )
3586
3613
num_feature_args ++ ;
3587
3614
if (num_feature_args ) {
3588
3615
DMEMIT (" %d" , num_feature_args );
@@ -3598,8 +3625,8 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
3598
3625
DMEMIT (" no_read_workqueue" );
3599
3626
if (test_bit (DM_CRYPT_NO_WRITE_WORKQUEUE , & cc -> flags ))
3600
3627
DMEMIT (" no_write_workqueue" );
3601
- if (cc -> on_disk_tag_size )
3602
- DMEMIT (" integrity:%u:%s" , cc -> on_disk_tag_size , cc -> cipher_auth );
3628
+ if (cc -> used_tag_size )
3629
+ DMEMIT (" integrity:%u:%s" , cc -> used_tag_size , cc -> cipher_auth );
3603
3630
if (cc -> sector_size != (1 << SECTOR_SHIFT ))
3604
3631
DMEMIT (" sector_size:%d" , cc -> sector_size );
3605
3632
if (test_bit (CRYPT_IV_LARGE_SECTORS , & cc -> cipher_flags ))
@@ -3621,9 +3648,9 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
3621
3648
DMEMIT (",iv_large_sectors=%c" , test_bit (CRYPT_IV_LARGE_SECTORS , & cc -> cipher_flags ) ?
3622
3649
'y' : 'n' );
3623
3650
3624
- if (cc -> on_disk_tag_size )
3651
+ if (cc -> used_tag_size )
3625
3652
DMEMIT (",integrity_tag_size=%u,cipher_auth=%s" ,
3626
- cc -> on_disk_tag_size , cc -> cipher_auth );
3653
+ cc -> used_tag_size , cc -> cipher_auth );
3627
3654
if (cc -> sector_size != (1 << SECTOR_SHIFT ))
3628
3655
DMEMIT (",sector_size=%d" , cc -> sector_size );
3629
3656
if (cc -> cipher_string )
@@ -3731,7 +3758,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
3731
3758
3732
3759
static struct target_type crypt_target = {
3733
3760
.name = "crypt" ,
3734
- .version = {1 , 26 , 0 },
3761
+ .version = {1 , 27 , 0 },
3735
3762
.module = THIS_MODULE ,
3736
3763
.ctr = crypt_ctr ,
3737
3764
.dtr = crypt_dtr ,
0 commit comments