2222#include "blk-cgroup.h"
2323#include "blk-crypto-internal.h"
2424
25- static unsigned int num_prealloc_bounce_pg = 32 ;
25+ static unsigned int num_prealloc_bounce_pg = BIO_MAX_VECS ;
2626module_param (num_prealloc_bounce_pg , uint , 0 );
2727MODULE_PARM_DESC (num_prealloc_bounce_pg ,
2828 "Number of preallocated bounce pages for the blk-crypto crypto API fallback" );
@@ -144,11 +144,21 @@ static const struct blk_crypto_ll_ops blk_crypto_fallback_ll_ops = {
144144static void blk_crypto_fallback_encrypt_endio (struct bio * enc_bio )
145145{
146146 struct bio * src_bio = enc_bio -> bi_private ;
147- int i ;
147+ struct page * * pages = (struct page * * )enc_bio -> bi_io_vec ;
148+ struct bio_vec * bv ;
149+ unsigned int i ;
150+
151+ /*
152+ * Use the same trick as the alloc side to avoid the need for an extra
153+ * pages array.
154+ */
155+ bio_for_each_bvec_all (bv , enc_bio , i )
156+ pages [i ] = bv -> bv_page ;
148157
149- for (i = 0 ; i < enc_bio -> bi_vcnt ; i ++ )
150- mempool_free (enc_bio -> bi_io_vec [i ].bv_page ,
151- blk_crypto_bounce_page_pool );
158+ i = mempool_free_bulk (blk_crypto_bounce_page_pool , (void * * )pages ,
159+ enc_bio -> bi_vcnt );
160+ if (i < enc_bio -> bi_vcnt )
161+ release_pages (pages + i , enc_bio -> bi_vcnt - i );
152162
153163 if (enc_bio -> bi_status )
154164 cmpxchg (& src_bio -> bi_status , 0 , enc_bio -> bi_status );
@@ -157,9 +167,14 @@ static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio)
157167 bio_endio (src_bio );
158168}
159169
170+ #define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *))
171+
160172static struct bio * blk_crypto_alloc_enc_bio (struct bio * bio_src ,
161- unsigned int nr_segs )
173+ unsigned int nr_segs , struct page * * * pages_ret )
162174{
175+ unsigned int memflags = memalloc_noio_save ();
176+ unsigned int nr_allocated ;
177+ struct page * * pages ;
163178 struct bio * bio ;
164179
165180 bio = bio_alloc_bioset (bio_src -> bi_bdev , nr_segs , bio_src -> bi_opf ,
@@ -173,6 +188,30 @@ static struct bio *blk_crypto_alloc_enc_bio(struct bio *bio_src,
173188 bio -> bi_write_stream = bio_src -> bi_write_stream ;
174189 bio -> bi_iter .bi_sector = bio_src -> bi_iter .bi_sector ;
175190 bio_clone_blkg_association (bio , bio_src );
191+
192+ /*
193+ * Move page array up in the allocated memory for the bio vecs as far as
194+ * possible so that we can start filling biovecs from the beginning
195+ * without overwriting the temporary page array.
196+ */
197+ static_assert (PAGE_PTRS_PER_BVEC > 1 );
198+ pages = (struct page * * )bio -> bi_io_vec ;
199+ pages += nr_segs * (PAGE_PTRS_PER_BVEC - 1 );
200+
201+ /*
202+ * Try a bulk allocation first. This could leave random pages in the
203+ * array unallocated, but we'll fix that up later in mempool_alloc_bulk.
204+ *
205+ * Note: alloc_pages_bulk needs the array to be zeroed, as it assumes
206+ * any non-zero slot already contains a valid allocation.
207+ */
208+ memset (pages , 0 , sizeof (struct page * ) * nr_segs );
209+ nr_allocated = alloc_pages_bulk (GFP_KERNEL , nr_segs , pages );
210+ if (nr_allocated < nr_segs )
211+ mempool_alloc_bulk (blk_crypto_bounce_page_pool , (void * * )pages ,
212+ nr_segs , nr_allocated );
213+ memalloc_noio_restore (memflags );
214+ * pages_ret = pages ;
176215 return bio ;
177216}
178217
@@ -209,6 +248,7 @@ static void __blk_crypto_fallback_encrypt_bio(struct bio *src_bio,
209248 struct scatterlist src , dst ;
210249 union blk_crypto_iv iv ;
211250 unsigned int nr_enc_pages , enc_idx ;
251+ struct page * * enc_pages ;
212252 struct bio * enc_bio ;
213253 unsigned int i ;
214254
@@ -231,15 +271,13 @@ static void __blk_crypto_fallback_encrypt_bio(struct bio *src_bio,
231271 */
232272new_bio :
233273 nr_enc_pages = min (bio_segments (src_bio ), BIO_MAX_VECS );
234- enc_bio = blk_crypto_alloc_enc_bio (src_bio , nr_enc_pages );
274+ enc_bio = blk_crypto_alloc_enc_bio (src_bio , nr_enc_pages , & enc_pages );
235275 enc_idx = 0 ;
236276 for (;;) {
237277 struct bio_vec src_bv =
238278 bio_iter_iovec (src_bio , src_bio -> bi_iter );
239- struct page * enc_page ;
279+ struct page * enc_page = enc_pages [ enc_idx ] ;
240280
241- enc_page = mempool_alloc (blk_crypto_bounce_page_pool ,
242- GFP_NOIO );
243281 __bio_add_page (enc_bio , enc_page , src_bv .bv_len ,
244282 src_bv .bv_offset );
245283
@@ -258,10 +296,8 @@ static void __blk_crypto_fallback_encrypt_bio(struct bio *src_bio,
258296 */
259297 for (i = 0 ; i < src_bv .bv_len ; i += data_unit_size ) {
260298 blk_crypto_dun_to_iv (curr_dun , & iv );
261- if (crypto_skcipher_encrypt (ciph_req )) {
262- bio_io_error (enc_bio );
263- return ;
264- }
299+ if (crypto_skcipher_encrypt (ciph_req ))
300+ goto out_free_enc_bio ;
265301 bio_crypt_dun_increment (curr_dun , 1 );
266302 src .offset += data_unit_size ;
267303 dst .offset += data_unit_size ;
@@ -287,6 +323,18 @@ static void __blk_crypto_fallback_encrypt_bio(struct bio *src_bio,
287323 }
288324
289325 submit_bio (enc_bio );
326+ return ;
327+
328+ out_free_enc_bio :
329+ /*
330+ * Add the remaining pages to the bio so that the normal completion path
331+ * in blk_crypto_fallback_encrypt_endio frees them. The exact data
332+ * layout does not matter for that, so don't bother iterating the source
333+ * bio.
334+ */
335+ for (; enc_idx < nr_enc_pages ; enc_idx ++ )
336+ __bio_add_page (enc_bio , enc_pages [enc_idx ++ ], PAGE_SIZE , 0 );
337+ bio_io_error (enc_bio );
290338}
291339
292340/*
0 commit comments