@@ -129,7 +129,8 @@ struct iv_elephant_private {
129
129
*/
130
130
enum flags { DM_CRYPT_SUSPENDED , DM_CRYPT_KEY_VALID ,
131
131
DM_CRYPT_SAME_CPU , DM_CRYPT_NO_OFFLOAD ,
132
- DM_CRYPT_NO_READ_WORKQUEUE , DM_CRYPT_NO_WRITE_WORKQUEUE };
132
+ DM_CRYPT_NO_READ_WORKQUEUE , DM_CRYPT_NO_WRITE_WORKQUEUE ,
133
+ DM_CRYPT_WRITE_INLINE };
133
134
134
135
enum cipher_flags {
135
136
CRYPT_MODE_INTEGRITY_AEAD , /* Use authenticated mode for cihper */
@@ -1919,9 +1920,32 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
1919
1920
spin_unlock_irqrestore (& cc -> write_thread_lock , flags );
1920
1921
}
1921
1922
1923
+ static bool kcryptd_crypt_write_inline (struct crypt_config * cc ,
1924
+ struct convert_context * ctx )
1925
+
1926
+ {
1927
+ if (!test_bit (DM_CRYPT_WRITE_INLINE , & cc -> flags ))
1928
+ return false;
1929
+
1930
+ /*
1931
+ * Note: zone append writes (REQ_OP_ZONE_APPEND) do not have ordering
1932
+ * constraints so they do not need to be issued inline by
1933
+ * kcryptd_crypt_write_convert().
1934
+ */
1935
+ switch (bio_op (ctx -> bio_in )) {
1936
+ case REQ_OP_WRITE :
1937
+ case REQ_OP_WRITE_SAME :
1938
+ case REQ_OP_WRITE_ZEROES :
1939
+ return true;
1940
+ default :
1941
+ return false;
1942
+ }
1943
+ }
1944
+
1922
1945
static void kcryptd_crypt_write_convert (struct dm_crypt_io * io )
1923
1946
{
1924
1947
struct crypt_config * cc = io -> cc ;
1948
+ struct convert_context * ctx = & io -> ctx ;
1925
1949
struct bio * clone ;
1926
1950
int crypt_finished ;
1927
1951
sector_t sector = io -> sector ;
@@ -1931,7 +1955,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
1931
1955
* Prevent io from disappearing until this function completes.
1932
1956
*/
1933
1957
crypt_inc_pending (io );
1934
- crypt_convert_init (cc , & io -> ctx , NULL , io -> base_bio , sector );
1958
+ crypt_convert_init (cc , ctx , NULL , io -> base_bio , sector );
1935
1959
1936
1960
clone = crypt_alloc_buffer (io , io -> base_bio -> bi_iter .bi_size );
1937
1961
if (unlikely (!clone )) {
@@ -1945,11 +1969,16 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
1945
1969
sector += bio_sectors (clone );
1946
1970
1947
1971
crypt_inc_pending (io );
1948
- r = crypt_convert (cc , & io -> ctx ,
1972
+ r = crypt_convert (cc , ctx ,
1949
1973
test_bit (DM_CRYPT_NO_WRITE_WORKQUEUE , & cc -> flags ));
1950
1974
if (r )
1951
1975
io -> error = r ;
1952
- crypt_finished = atomic_dec_and_test (& io -> ctx .cc_pending );
1976
+ crypt_finished = atomic_dec_and_test (& ctx -> cc_pending );
1977
+ if (!crypt_finished && kcryptd_crypt_write_inline (cc , ctx )) {
1978
+ /* Wait for completion signaled by kcryptd_async_done() */
1979
+ wait_for_completion (& ctx -> restart );
1980
+ crypt_finished = 1 ;
1981
+ }
1953
1982
1954
1983
/* Encryption was already finished, submit io now */
1955
1984
if (crypt_finished ) {
@@ -2021,10 +2050,21 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
2021
2050
if (!atomic_dec_and_test (& ctx -> cc_pending ))
2022
2051
return ;
2023
2052
2024
- if (bio_data_dir (io -> base_bio ) == READ )
2053
+ /*
2054
+ * The request is fully completed: for inline writes, let
2055
+ * kcryptd_crypt_write_convert() do the IO submission.
2056
+ */
2057
+ if (bio_data_dir (io -> base_bio ) == READ ) {
2025
2058
kcryptd_crypt_read_done (io );
2026
- else
2027
- kcryptd_crypt_write_io_submit (io , 1 );
2059
+ return ;
2060
+ }
2061
+
2062
+ if (kcryptd_crypt_write_inline (cc , ctx )) {
2063
+ complete (& ctx -> restart );
2064
+ return ;
2065
+ }
2066
+
2067
+ kcryptd_crypt_write_io_submit (io , 1 );
2028
2068
}
2029
2069
2030
2070
static void kcryptd_crypt (struct work_struct * work )
@@ -2936,6 +2976,21 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
2936
2976
return 0 ;
2937
2977
}
2938
2978
2979
+ #ifdef CONFIG_BLK_DEV_ZONED
2980
+
2981
+ static int crypt_report_zones (struct dm_target * ti ,
2982
+ struct dm_report_zones_args * args , unsigned int nr_zones )
2983
+ {
2984
+ struct crypt_config * cc = ti -> private ;
2985
+ sector_t sector = cc -> start + dm_target_offset (ti , args -> next_sector );
2986
+
2987
+ args -> start = cc -> start ;
2988
+ return blkdev_report_zones (cc -> dev -> bdev , sector , nr_zones ,
2989
+ dm_report_zones_cb , args );
2990
+ }
2991
+
2992
+ #endif
2993
+
2939
2994
/*
2940
2995
* Construct an encryption mapping:
2941
2996
* <cipher> [<key>|:<key_size>:<user|logon>:<key_description>] <iv_offset> <dev_path> <start>
@@ -3069,6 +3124,16 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
3069
3124
}
3070
3125
cc -> start = tmpll ;
3071
3126
3127
+ /*
3128
+ * For zoned block devices, we need to preserve the issuer write
3129
+ * ordering. To do so, disable write workqueues and force inline
3130
+ * encryption completion.
3131
+ */
3132
+ if (bdev_is_zoned (cc -> dev -> bdev )) {
3133
+ set_bit (DM_CRYPT_NO_WRITE_WORKQUEUE , & cc -> flags );
3134
+ set_bit (DM_CRYPT_WRITE_INLINE , & cc -> flags );
3135
+ }
3136
+
3072
3137
if (crypt_integrity_aead (cc ) || cc -> integrity_iv_size ) {
3073
3138
ret = crypt_integrity_ctr (cc , ti );
3074
3139
if (ret )
@@ -3358,6 +3423,10 @@ static struct target_type crypt_target = {
3358
3423
.module = THIS_MODULE ,
3359
3424
.ctr = crypt_ctr ,
3360
3425
.dtr = crypt_dtr ,
3426
+ #ifdef CONFIG_BLK_DEV_ZONED
3427
+ .features = DM_TARGET_ZONED_HM ,
3428
+ .report_zones = crypt_report_zones ,
3429
+ #endif
3361
3430
.map = crypt_map ,
3362
3431
.status = crypt_status ,
3363
3432
.postsuspend = crypt_postsuspend ,
0 commit comments