|
21 | 21 | #include "poly_kl.h" |
22 | 22 | #include "polyvec.h" |
23 | 23 |
|
24 | | -#if !defined(MLD_USE_NATIVE_NTT_CUSTOM_ORDER) |
25 | 24 | /* This namespacing is not done at the top to avoid a naming conflict |
26 | 25 | * with native backends, which are currently not yet namespaced. */ |
27 | | -#define mld_poly_permute_bitrev_to_custom \ |
28 | | - MLD_NAMESPACE_KL(mld_poly_permute_bitrev_to_custom) |
| 26 | +#define mld_matrix_permute_bitrev_to_custom \ |
| 27 | + MLD_NAMESPACE_KL(mld_matrix_permute_bitrev_to_custom) |
29 | 28 |
|
30 | | -static MLD_INLINE void mld_poly_permute_bitrev_to_custom(int32_t data[MLDSA_N]) |
| 29 | +/* Helper function to ensure that the polynomial entries in the output |
| 30 | + * of mld_polyvec_matrix_expand use the standard (bitreversed) ordering |
| 31 | + * of coefficients. |
| 32 | + * No-op unless a native backend with a custom ordering is used. |
| 33 | + */ |
| 34 | +static void mld_matrix_permute_bitrev_to_custom(mld_polyvecl mat[MLDSA_K]) |
| 35 | +__contract__( |
| 36 | + /* We don't specify that this should be a permutation, but only |
| 37 | + * that it does not change the bound established at the end of |
| 38 | + * mld_polyvec_matrix_expand. |
| 39 | + */ |
| 40 | + requires(memory_no_alias(mat, MLDSA_K * sizeof(mld_polyvecl))) |
| 41 | + requires(forall(k1, 0, MLDSA_K, forall(l1, 0, MLDSA_L, |
| 42 | + array_bound(mat[k1].vec[l1].coeffs, 0, MLDSA_N, 0, MLDSA_Q)))) |
| 43 | + assigns(object_whole(mat)) |
| 44 | + ensures(forall(k1, 0, MLDSA_K, forall(l1, 0, MLDSA_L, |
| 45 | + array_bound(mat[k1].vec[l1].coeffs, 0, MLDSA_N, 0, MLDSA_Q)))) |
| 46 | +) |
31 | 47 | { |
32 | | - ((void)data); |
33 | | -} |
| 48 | +#if defined(MLD_USE_NATIVE_NTT_CUSTOM_ORDER) |
| 49 | + unsigned int i, j; |
| 50 | + for (i = 0; i < MLDSA_K; i++) |
| 51 | + { |
| 52 | + for (j = 0; j < MLDSA_L; j++) |
| 53 | + { |
| 54 | + mld_poly_permute_bitrev_to_custom(mat[i].vec[j].coeffs); |
| 55 | + } |
| 56 | + } |
| 57 | + |
| 58 | +#else /* MLD_USE_NATIVE_NTT_CUSTOM_ORDER */ |
| 59 | + |
| 60 | + /* Nothing to do */ |
| 61 | + ((void)mat); |
| 62 | + |
34 | 63 | #endif /* !MLD_USE_NATIVE_NTT_CUSTOM_ORDER */ |
| 64 | +} |
35 | 65 |
|
36 | 66 |
|
37 | 67 | MLD_INTERNAL_API |
@@ -107,21 +137,7 @@ void mld_polyvec_matrix_expand(mld_polyvecl mat[MLDSA_K], |
107 | 137 | i++; |
108 | 138 | } |
109 | 139 |
|
110 | | - /* |
111 | | - * The public matrix is generated in NTT domain. If the native backend |
112 | | - * uses a custom order in NTT domain, permute A accordingly. This does |
113 | | - * not affect the bounds on the coefficients, so we ignore this for CBMC |
114 | | - * to simplify proof. |
115 | | - */ |
116 | | -#ifndef CBMC |
117 | | - for (i = 0; i < MLDSA_K; i++) |
118 | | - { |
119 | | - for (j = 0; j < MLDSA_L; j++) |
120 | | - { |
121 | | - mld_poly_permute_bitrev_to_custom(mat[i].vec[j].coeffs); |
122 | | - } |
123 | | - } |
124 | | -#endif /* !CBMC */ |
| 140 | + mld_matrix_permute_bitrev_to_custom(mat); |
125 | 141 |
|
126 | 142 | /* @[FIPS204, Section 3.6.3] Destruction of intermediate values. */ |
127 | 143 | mld_zeroize(single_seed, sizeof(single_seed)); |
@@ -160,7 +176,7 @@ void mld_polyvecl_uniform_gamma1(mld_polyvecl *v, |
160 | 176 | /* Safety: nonce is at most ((UINT16_MAX - MLDSA_L) / MLDSA_L), and, hence, |
161 | 177 | * this cast is safe. See NONCE_UB comment in sign.c. */ |
162 | 178 | nonce = (uint16_t)(MLDSA_L * nonce); |
163 | | - /* Now, nonce <= UINT16_MAX - (MLDSA_L - 1), so the casts below are safe. */ |
| 179 | +/* Now, nonce <= UINT16_MAX - (MLDSA_L - 1), so the casts below are safe. */ |
164 | 180 | #if MLDSA_L == 4 |
165 | 181 | mld_poly_uniform_gamma1_4x(&v->vec[0], &v->vec[1], &v->vec[2], &v->vec[3], |
166 | 182 | seed, nonce, (uint16_t)(nonce + 1), |
|
0 commit comments