4848 *
4949 * Numbers reference steps of `Algorithm SPA-resistant Width-w NAF with Odd Scalar` on pp. 335
5050 */
51- static int secp256k1_wnaf_const (int * wnaf , secp256k1_scalar s , int w ) {
51+ static int secp256k1_wnaf_const (int * wnaf , secp256k1_scalar s , int w , int size ) {
5252 int global_sign ;
5353 int skew = 0 ;
5454 int word = 0 ;
@@ -67,9 +67,14 @@ static int secp256k1_wnaf_const(int *wnaf, secp256k1_scalar s, int w) {
6767 * and we'd lose any performance benefit. Instead, we use a technique from
6868 * Section 4.2 of the Okeya/Tagaki paper, which is to add either 1 (for even)
6969 * or 2 (for odd) to the number we are encoding, returning a skew value indicating
70- * this, and having the caller compensate after doing the multiplication. */
71-
72- /* Negative numbers will be negated to keep their bit representation below the maximum width */
70+ * this, and having the caller compensate after doing the multiplication.
71+ *
72+ * In fact, we _do_ want to negate numbers to minimize their bit-lengths (and in
73+ * particular, to ensure that the outputs from the endomorphism-split fit into
74+ * 128 bits). If we negate, the parity of our number flips, inverting which of
75+ * {1, 2} we want to add to the scalar when ensuring that it's odd. Further
76+ * complicating things, -1 interacts badly with `secp256k1_scalar_cadd_bit` and
77+ * we need to special-case it in this logic. */
7378 flip = secp256k1_scalar_is_high (& s );
7479 /* We add 1 to even numbers, 2 to odd ones, noting that negation flips parity */
7580 bit = flip ^ !secp256k1_scalar_is_even (& s );
@@ -88,7 +93,7 @@ static int secp256k1_wnaf_const(int *wnaf, secp256k1_scalar s, int w) {
8893
8994 /* 4 */
9095 u_last = secp256k1_scalar_shr_int (& s , w );
91- while (word * w < WNAF_BITS ) {
96+ while (word * w < size ) {
9297 int sign ;
9398 int even ;
9499
@@ -108,37 +113,44 @@ static int secp256k1_wnaf_const(int *wnaf, secp256k1_scalar s, int w) {
108113 wnaf [word ] = u * global_sign ;
109114
110115 VERIFY_CHECK (secp256k1_scalar_is_zero (& s ));
111- VERIFY_CHECK (word == WNAF_SIZE ( w ));
116+ VERIFY_CHECK (word == WNAF_SIZE_BITS ( size , w ));
112117 return skew ;
113118}
114119
115-
116- static void secp256k1_ecmult_const (secp256k1_gej * r , const secp256k1_ge * a , const secp256k1_scalar * scalar ) {
120+ static void secp256k1_ecmult_const (secp256k1_gej * r , const secp256k1_ge * a , const secp256k1_scalar * scalar , int size ) {
117121 secp256k1_ge pre_a [ECMULT_TABLE_SIZE (WINDOW_A )];
118122 secp256k1_ge tmpa ;
119123 secp256k1_fe Z ;
120124
121125 int skew_1 ;
122- int wnaf_1 [1 + WNAF_SIZE (WINDOW_A - 1 )];
123126#ifdef USE_ENDOMORPHISM
124127 secp256k1_ge pre_a_lam [ECMULT_TABLE_SIZE (WINDOW_A )];
125128 int wnaf_lam [1 + WNAF_SIZE (WINDOW_A - 1 )];
126129 int skew_lam ;
127130 secp256k1_scalar q_1 , q_lam ;
128131#endif
132+ int wnaf_1 [1 + WNAF_SIZE (WINDOW_A - 1 )];
129133
130134 int i ;
131135 secp256k1_scalar sc = * scalar ;
132136
133137 /* build wnaf representation for q. */
138+ int rsize = size ;
134139#ifdef USE_ENDOMORPHISM
135- /* split q into q_1 and q_lam (where q = q_1 + q_lam*lambda, and q_1 and q_lam are ~128 bit) */
136- secp256k1_scalar_split_lambda (& q_1 , & q_lam , & sc );
137- skew_1 = secp256k1_wnaf_const (wnaf_1 , q_1 , WINDOW_A - 1 );
138- skew_lam = secp256k1_wnaf_const (wnaf_lam , q_lam , WINDOW_A - 1 );
139- #else
140- skew_1 = secp256k1_wnaf_const (wnaf_1 , sc , WINDOW_A - 1 );
140+ if (size > 128 ) {
141+ rsize = 128 ;
142+ /* split q into q_1 and q_lam (where q = q_1 + q_lam*lambda, and q_1 and q_lam are ~128 bit) */
143+ secp256k1_scalar_split_lambda (& q_1 , & q_lam , & sc );
144+ skew_1 = secp256k1_wnaf_const (wnaf_1 , q_1 , WINDOW_A - 1 , 128 );
145+ skew_lam = secp256k1_wnaf_const (wnaf_lam , q_lam , WINDOW_A - 1 , 128 );
146+ } else
141147#endif
148+ {
149+ skew_1 = secp256k1_wnaf_const (wnaf_1 , sc , WINDOW_A - 1 , size );
150+ #ifdef USE_ENDOMORPHISM
151+ skew_lam = 0 ;
152+ #endif
153+ }
142154
143155 /* Calculate odd multiples of a.
144156 * All multiples are brought to the same Z 'denominator', which is stored
@@ -152,26 +164,30 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons
152164 secp256k1_fe_normalize_weak (& pre_a [i ].y );
153165 }
154166#ifdef USE_ENDOMORPHISM
155- for (i = 0 ; i < ECMULT_TABLE_SIZE (WINDOW_A ); i ++ ) {
156- secp256k1_ge_mul_lambda (& pre_a_lam [i ], & pre_a [i ]);
167+ if (size > 128 ) {
168+ for (i = 0 ; i < ECMULT_TABLE_SIZE (WINDOW_A ); i ++ ) {
169+ secp256k1_ge_mul_lambda (& pre_a_lam [i ], & pre_a [i ]);
170+ }
157171 }
158172#endif
159173
160174 /* first loop iteration (separated out so we can directly set r, rather
161175 * than having it start at infinity, get doubled several times, then have
162176 * its new value added to it) */
163- i = wnaf_1 [WNAF_SIZE ( WINDOW_A - 1 )];
177+ i = wnaf_1 [WNAF_SIZE_BITS ( rsize , WINDOW_A - 1 )];
164178 VERIFY_CHECK (i != 0 );
165179 ECMULT_CONST_TABLE_GET_GE (& tmpa , pre_a , i , WINDOW_A );
166180 secp256k1_gej_set_ge (r , & tmpa );
167181#ifdef USE_ENDOMORPHISM
168- i = wnaf_lam [WNAF_SIZE (WINDOW_A - 1 )];
169- VERIFY_CHECK (i != 0 );
170- ECMULT_CONST_TABLE_GET_GE (& tmpa , pre_a_lam , i , WINDOW_A );
171- secp256k1_gej_add_ge (r , r , & tmpa );
182+ if (size > 128 ) {
183+ i = wnaf_lam [WNAF_SIZE_BITS (rsize , WINDOW_A - 1 )];
184+ VERIFY_CHECK (i != 0 );
185+ ECMULT_CONST_TABLE_GET_GE (& tmpa , pre_a_lam , i , WINDOW_A );
186+ secp256k1_gej_add_ge (r , r , & tmpa );
187+ }
172188#endif
173189 /* remaining loop iterations */
174- for (i = WNAF_SIZE ( WINDOW_A - 1 ) - 1 ; i >= 0 ; i -- ) {
190+ for (i = WNAF_SIZE_BITS ( rsize , WINDOW_A - 1 ) - 1 ; i >= 0 ; i -- ) {
175191 int n ;
176192 int j ;
177193 for (j = 0 ; j < WINDOW_A - 1 ; ++ j ) {
@@ -183,10 +199,12 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons
183199 VERIFY_CHECK (n != 0 );
184200 secp256k1_gej_add_ge (r , r , & tmpa );
185201#ifdef USE_ENDOMORPHISM
186- n = wnaf_lam [i ];
187- ECMULT_CONST_TABLE_GET_GE (& tmpa , pre_a_lam , n , WINDOW_A );
188- VERIFY_CHECK (n != 0 );
189- secp256k1_gej_add_ge (r , r , & tmpa );
202+ if (size > 128 ) {
203+ n = wnaf_lam [i ];
204+ ECMULT_CONST_TABLE_GET_GE (& tmpa , pre_a_lam , n , WINDOW_A );
205+ VERIFY_CHECK (n != 0 );
206+ secp256k1_gej_add_ge (r , r , & tmpa );
207+ }
190208#endif
191209 }
192210
@@ -206,14 +224,18 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons
206224 secp256k1_ge_set_gej (& correction , & tmpj );
207225 secp256k1_ge_to_storage (& correction_1_stor , a );
208226#ifdef USE_ENDOMORPHISM
209- secp256k1_ge_to_storage (& correction_lam_stor , a );
227+ if (size > 128 ) {
228+ secp256k1_ge_to_storage (& correction_lam_stor , a );
229+ }
210230#endif
211231 secp256k1_ge_to_storage (& a2_stor , & correction );
212232
213233 /* For odd numbers this is 2a (so replace it), for even ones a (so no-op) */
214234 secp256k1_ge_storage_cmov (& correction_1_stor , & a2_stor , skew_1 == 2 );
215235#ifdef USE_ENDOMORPHISM
216- secp256k1_ge_storage_cmov (& correction_lam_stor , & a2_stor , skew_lam == 2 );
236+ if (size > 128 ) {
237+ secp256k1_ge_storage_cmov (& correction_lam_stor , & a2_stor , skew_lam == 2 );
238+ }
217239#endif
218240
219241 /* Apply the correction */
@@ -222,10 +244,12 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons
222244 secp256k1_gej_add_ge (r , r , & correction );
223245
224246#ifdef USE_ENDOMORPHISM
225- secp256k1_ge_from_storage (& correction , & correction_lam_stor );
226- secp256k1_ge_neg (& correction , & correction );
227- secp256k1_ge_mul_lambda (& correction , & correction );
228- secp256k1_gej_add_ge (r , r , & correction );
247+ if (size > 128 ) {
248+ secp256k1_ge_from_storage (& correction , & correction_lam_stor );
249+ secp256k1_ge_neg (& correction , & correction );
250+ secp256k1_ge_mul_lambda (& correction , & correction );
251+ secp256k1_gej_add_ge (r , r , & correction );
252+ }
229253#endif
230254 }
231255}
0 commit comments