-
Notifications
You must be signed in to change notification settings - Fork 597
Expand file tree
/
Copy pathsha256.pil
More file actions
578 lines (531 loc) · 23.7 KB
/
sha256.pil
File metadata and controls
578 lines (531 loc) · 23.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
include "precomputed.pil";
include "gt.pil";
include "constants_gen.pil";
include "sha256_mem.pil";
/**
* This subtrace performs the Sha256 Compression function on a set of given inputs
* It is organised into 65 rows (64 compression rounds + 1 row for the output).
* All input values are assumed to be range checked to 32-bits by the calling function
* (i.e. inputs come from the sha256_mem.pil). Outputs values are guaranteed to be 32-bits
* by this subtrace. To simulate modulo operations we use the convention lhs_a and rhs_a to
* indicate the big-endian decomposition of a value `a`, there lhs_a is the MSB while rhs_a is the LSB.
*
* NOTE_ON_ROTATIONS
* A more detailed explanation can be found in keccakf1600.pil but we re-formulate it here for the 32-bit case
* (1) Given a positive integer a < 32 by which we want to rotate the value X by (i.e. X `rotr` a)
* (2) Decompose the value X into limbs: X = lhs_x * 2^a + rhs_x
* (3) The result of the right rotation is : Y = rhs_x * 2^(32 - a) + lhs_x
* It is sufficient to simply constrain X < 2^32, Y < 2^32 and rhs_x < 2^a (see again keccakf1600 for details)
* Given X, Y < 2^32 are constrained through their use in bitwise lookups or from being loaded in the sha256_mem trace
* We only need to actively check rhs_x < 2^a in this subtrace.
*/
namespace sha256;
#[skippable_if]
sel = 0; // from sha256_mem.pil
pol SEL_NO_ERR = sel * (1 - err);
// Initial state values loaded from state_offset;
pol commit init_a;
pol commit init_b;
pol commit init_c;
pol commit init_d;
pol commit init_e;
pol commit init_f;
pol commit init_g;
pol commit init_h;
// Output state values to be written to memory, the need to be "limbed" since it's addition modulo 2^32
// We could re-use the init columns but we need to reverse the round orders.
// Only the output_X_rhs is written to memory
pol commit output_a_lhs;
pol commit output_a_rhs;
pol commit output_b_lhs;
pol commit output_b_rhs;
pol commit output_c_lhs;
pol commit output_c_rhs;
pol commit output_d_lhs;
pol commit output_d_rhs;
pol commit output_e_lhs;
pol commit output_e_rhs;
pol commit output_f_lhs;
pol commit output_f_rhs;
pol commit output_g_lhs;
pol commit output_g_rhs;
pol commit output_h_lhs;
pol commit output_h_rhs;
// These are needed while we can't use constant values in lookups.
// For the XOR lookups
pol commit xor_sel;
perform_round * (xor_sel - 2) = 0;
// For the AND lookups
pol commit and_sel;
and_sel = 0;
// We perform a compression operation if we are not at a latched row, or there is not an err
pol commit perform_round;
perform_round = (1 - LATCH_CONDITION) * SEL_NO_ERR;
pol commit last;
last = SEL_NO_ERR * latch;
// Counter
pol NUM_ROUNDS = 64;
pol commit rounds_remaining;
start * (rounds_remaining - NUM_ROUNDS) + perform_round * (rounds_remaining - rounds_remaining' - 1) = 0;
pol commit round_count;
SEL_NO_ERR * (round_count - (NUM_ROUNDS - rounds_remaining)) = 0;
pol commit rounds_remaining_inv;
// latch == 1 when the rounds_remaining == 0
SEL_NO_ERR * (rounds_remaining * (latch * (1 - rounds_remaining_inv) + rounds_remaining_inv) - 1 + latch) = 0;
pol commit two_pow_32; // todo: while we no support for constants
SEL_NO_ERR * (two_pow_32 - 2**32) = 0;
// Hash Values, at start must match the initial hash values
pol commit a;
start * (1 - err) * (a - init_a) = 0;
pol commit b;
start * (1 - err) * (b - init_b) = 0;
pol commit c;
start * (1 - err) * (c - init_c) = 0;
pol commit d;
start * (1 - err) * (d - init_d) = 0;
pol commit e;
start * (1 - err) * (e - init_e) = 0;
pol commit f;
start * (1 - err) * (f - init_f) = 0;
pol commit g;
start * (1 - err) * (g - init_g) = 0;
pol commit h;
start * (1 - err) * (h - init_h) = 0;
// ========== COMPUTE W =============
// w is only computed on the 16th round (as 0-15 are populated from the input)
// s0 := (w[i-15] rightrotate 7) xor (w[i-15] rightrotate 18) xor (w[i-15] rightshift 3)
// s1 := (w[i-2] rightrotate 17) xor (w[i-2] rightrotate 19) xor (w[i-2] rightshift 10)
// w[i] := w[i-16] + s0 + w[i-7] + s1
// Computing the w values for each round requires the 16th, 15th, 7th and 2nd previous w values (i - 16, i - 15, i - 7 and i - 2)
// To store this in the vm, we track 16 columns that are added to by each row and we denote the following
// helper_w0 = i - 16, helper_w1 = i - 15, helper_w9 = i - 7, helper_w14 = i - 2, helper_w15 = i - 1
pol commit helper_w0, helper_w1, helper_w2, helper_w3;
pol commit helper_w4, helper_w5, helper_w6, helper_w7;
pol commit helper_w8, helper_w9, helper_w10, helper_w11;
pol commit helper_w12, helper_w13, helper_w14, helper_w15;
perform_round * (helper_w0' - helper_w1) = 0;
perform_round * (helper_w1' - helper_w2) = 0;
perform_round * (helper_w2' - helper_w3) = 0;
perform_round * (helper_w3' - helper_w4) = 0;
perform_round * (helper_w4' - helper_w5) = 0;
perform_round * (helper_w5' - helper_w6) = 0;
perform_round * (helper_w6' - helper_w7) = 0;
perform_round * (helper_w7' - helper_w8) = 0;
perform_round * (helper_w8' - helper_w9) = 0;
perform_round * (helper_w9' - helper_w10) = 0;
perform_round * (helper_w10' - helper_w11) = 0;
perform_round * (helper_w11' - helper_w12) = 0;
perform_round * (helper_w12' - helper_w13) = 0;
perform_round * (helper_w13' - helper_w14) = 0;
perform_round * (helper_w14' - helper_w15) = 0;
// The last value is given the currently computed w value
perform_round * (helper_w15' - w) = 0;
// Message Schedule Array, in the first row
pol commit w;
// We compute w when we are no longer part of an input round (i.e. when we load input)
// from memory. This selector is therefore off for the first 16 rounds.
pol commit sel_compute_w; // Implicitly boolean based on constraint below
sel_compute_w = perform_round * (1 - sel_is_input_round);
pol COMPUTED_W = helper_w0 + w_s_0 + helper_w9 + w_s_1;
pol commit computed_w_lhs;
pol commit computed_w_rhs;
sel_compute_w * ((computed_w_lhs * 2**32 + computed_w_rhs) - COMPUTED_W) = 0;
sel_compute_w * (w - computed_w_rhs) = 0;
// Check Modulo Add Operation
#[RANGE_COMP_W_LHS]
sel_compute_w { two_pow_32, computed_w_lhs, /*result = 1*/ sel_compute_w }
in
gt.sel_sha256 { gt.input_a, gt.input_b, gt.res };
#[RANGE_COMP_W_RHS]
sel_compute_w { two_pow_32, computed_w_rhs, /*result = 1*/ sel_compute_w }
in
gt.sel_sha256 { gt.input_a, gt.input_b, gt.res };
// ========== Compute w_s0 ===================
// w[i-15] `rotr` 7
pol commit w_15_rotr_7;
pol commit lhs_w_7;
pol commit rhs_w_7;
sel_compute_w * (helper_w1 - (lhs_w_7 * 2**7 + rhs_w_7)) = 0;
sel_compute_w * (w_15_rotr_7 - (rhs_w_7 * 2**25 + lhs_w_7)) = 0;
///// Check rhs_w_7 < 2**7 (we don't have to check lhs_w_7 < 2**25 (32 - 7), see NOTE_ON_ROTATIONS at the top of this file
pol commit two_pow_7; // todo: while no constants in lookup
sel_compute_w * (two_pow_7 - 2**7) = 0;
#[RANGE_RHS_W_7]
sel_compute_w { two_pow_7, rhs_w_7, /*result = 1*/ sel_compute_w }
in
gt.sel_sha256 { gt.input_a, gt.input_b, gt.res };
///// End Check rhs_w_7
// w[i-15] `rotr` 18
pol commit w_15_rotr_18;
pol commit lhs_w_18;
pol commit rhs_w_18;
sel_compute_w * (helper_w1 - (lhs_w_18 * 2**18 + rhs_w_18)) = 0;
sel_compute_w * (w_15_rotr_18 - (rhs_w_18 * 2**14 + lhs_w_18)) = 0;
///// Check rhs_w_18 < 2**18 (we don't have to check lhs_w_18 < 2**14 (32 - 18), see NOTE_ON_ROTATIONS at the top of this file
pol commit two_pow_18; // todo: while no constants in lookup
sel_compute_w * (two_pow_18 - 2**18) = 0;
#[RANGE_RHS_W_18]
sel_compute_w { two_pow_18, rhs_w_18, /*result = 1*/ sel_compute_w }
in
gt.sel_sha256 { gt.input_a, gt.input_b, gt.res };
///// End Check rhs_w_18
// w[i-15] `rightshift` 3
pol commit w_15_rshift_3;
pol commit lhs_w_3;
pol commit rhs_w_3;
sel_compute_w * (helper_w1 - (lhs_w_3 * 2**3 + rhs_w_3)) = 0;
sel_compute_w * (w_15_rshift_3 - lhs_w_3) = 0;
///// Check rhs_w_3 < 2**3 (we don't have to check lhs_w_3 < 2**32 (aka w_15_rshift_3) since it's used in the lookup #[W_S_0_XOR_0])
pol commit two_pow_3; // todo: while no constants in lookup
sel_compute_w * (two_pow_3 - 2**3) = 0;
#[RANGE_RHS_W_3]
sel_compute_w { two_pow_3, rhs_w_3, /*result = 1*/sel_compute_w }
in
gt.sel_sha256 { gt.input_a, gt.input_b, gt.res };
///// End Check rhs_w_3
// s0 := (w[i-15] `rotr` 7) `xor` (w[i-15] `rotr` 18) `xor` (w[i-15] `rightshift` 3)
pol commit w_15_rotr_7_xor_w_15_rotr_18;
#[W_S_0_XOR_0]
sel_compute_w { w_15_rotr_7, w_15_rotr_18, w_15_rotr_7_xor_w_15_rotr_18, xor_sel, u32_tag }
in
bitwise.start_sha256 { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a };
pol commit w_s_0;
#[W_S_0_XOR_1]
sel_compute_w { w_15_rotr_7_xor_w_15_rotr_18, w_15_rshift_3, w_s_0, xor_sel, u32_tag }
in
bitwise.start_sha256 { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a };
// ========== Compute w_s1 ===================
// w[i-2] `rotr` 17
pol commit w_2_rotr_17;
pol commit lhs_w_17;
pol commit rhs_w_17;
sel_compute_w * (helper_w14 - (lhs_w_17 * 2**17 + rhs_w_17)) = 0;
sel_compute_w * (w_2_rotr_17 - (rhs_w_17 * 2**15 + lhs_w_17)) = 0;
///// Check rhs_w_17 < 2**17 (we don't have to check lhs_w_17 < 2**15 (32 - 17), see NOTE_ON_ROTATIONS at the top of this file
pol commit two_pow_17; // todo: while no constants in lookup
sel_compute_w * (two_pow_17 - 2**17) = 0;
#[RANGE_RHS_W_17]
sel_compute_w { two_pow_17, rhs_w_17, /*result = 1*/ sel_compute_w }
in
gt.sel_sha256 { gt.input_a, gt.input_b, gt.res };
///// End Check rhs_w_17
// w[i-2] `rotr` 19
pol commit w_2_rotr_19;
pol commit lhs_w_19;
pol commit rhs_w_19;
sel_compute_w * (helper_w14 - (lhs_w_19 * 2**19 + rhs_w_19)) = 0;
sel_compute_w * (w_2_rotr_19 - (rhs_w_19 * 2**13 + lhs_w_19)) = 0;
///// Check rhs_w_19 < 2**19 (we don't have to check lhs_w_19 < 2**13 (32 - 19), see NOTE_ON_ROTATIONS at the top of this file
pol commit two_pow_19; // todo: while no constants in lookup
sel_compute_w * (two_pow_19 - 2**19) = 0;
#[RANGE_RHS_W_19]
sel_compute_w { two_pow_19, rhs_w_19, /*result = 1*/ sel_compute_w }
in
gt.sel_sha256 { gt.input_a, gt.input_b, gt.res };
///// End Check rhs_w_19
// w[i-2] `rightshift` 10
pol commit w_2_rshift_10;
pol commit lhs_w_10;
pol commit rhs_w_10;
sel_compute_w * (helper_w14 - (lhs_w_10 * 2**10 + rhs_w_10)) = 0;
sel_compute_w * (w_2_rshift_10 - lhs_w_10) = 0;
///// Check rhs_w_10 < 2**10 (we don't have to check lhs_w_10 < 2**32 (aka w_2_rshift_10) since it's used in the lookup #[W_S_1_XOR_0])
pol commit two_pow_10; // todo: while no constants in lookup
sel_compute_w * (two_pow_10 - 2**10) = 0;
#[RANGE_RHS_W_10]
sel_compute_w { two_pow_10, rhs_w_10, /*result = 1*/ sel_compute_w }
in
gt.sel_sha256 { gt.input_a, gt.input_b, gt.res };
///// End Check rhs_w_10
// s1 := (w[i-2] `rotr` 17) `xor` (w[i-2] `rotr` 19) `xor` (w[i-2] `rightshift` 10)
pol commit w_2_rotr_17_xor_w_2_rotr_19;
#[W_S_1_XOR_0]
sel_compute_w { w_2_rotr_17, w_2_rotr_19, w_2_rotr_17_xor_w_2_rotr_19, xor_sel, u32_tag }
in
bitwise.start_sha256 { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a };
pol commit w_s_1;
#[W_S_1_XOR_1]
sel_compute_w { w_2_rotr_17_xor_w_2_rotr_19, w_2_rshift_10, w_s_1, xor_sel, u32_tag, u32_tag }
in
bitwise.start_sha256 { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a, bitwise.tag_a };
// ========== START OF COMPRESSION BLOCK ==================
// ====== Computing S1 ====================
// e `rotr` 6
pol commit e_rotr_6;
pol commit lhs_e_6;
pol commit rhs_e_6;
perform_round * (e - (lhs_e_6 * 2**6 + rhs_e_6)) = 0;
perform_round * (e_rotr_6 - (rhs_e_6 * 2**26 + lhs_e_6)) = 0;
///// Check rhs_e_6 < 2**6 (we don't have to check lhs_e_6 < 2**26 (32 - 6), see NOTE_ON_ROTATIONS at the top of this file
pol commit two_pow_6; // todo: while no constants in lookup
perform_round * (two_pow_6 - 2**6) = 0;
#[RANGE_RHS_E_6]
perform_round { two_pow_6, rhs_e_6, /*result = 1*/ perform_round }
in
gt.sel_sha256 { gt.input_a, gt.input_b, gt.res };
///// End Check rhs_e_6
// e `rotr` 11
pol commit e_rotr_11;
pol commit lhs_e_11;
pol commit rhs_e_11;
perform_round * (e - (lhs_e_11 * 2**11 + rhs_e_11)) = 0;
perform_round * (e_rotr_11 - (rhs_e_11 * 2**21 + lhs_e_11)) = 0;
///// Check rhs_e_11 < 2**11 (we don't have to check lhs_e_11 < 2**21 (32 - 11), see NOTE_ON_ROTATIONS at the top of this file
pol commit two_pow_11; // todo: while no constants in lookup
perform_round * (two_pow_11 - 2**11) = 0;
#[RANGE_RHS_E_11]
perform_round { two_pow_11, rhs_e_11, /*result = 1*/ perform_round }
in
gt.sel_sha256 { gt.input_a, gt.input_b, gt.res };
///// End Check rhs_e_11
// e `rotr` 25
pol commit e_rotr_25;
pol commit lhs_e_25;
pol commit rhs_e_25;
perform_round * (e - (lhs_e_25 * 2**25 + rhs_e_25)) = 0;
perform_round * (e_rotr_25 - (rhs_e_25 * 2**7 + lhs_e_25)) = 0;
///// Check rhs_e_25 < 2**25 (we don't have to check lhs_e_25 < 2**7 (32 - 25), see NOTE_ON_ROTATIONS at the top of this file
pol commit two_pow_25; // todo: while no constants in lookup
perform_round * (two_pow_25 - 2**25) = 0;
#[RANGE_RHS_E_25]
perform_round { two_pow_25, rhs_e_25, /*result = 1*/ perform_round }
in
gt.sel_sha256 { gt.input_a, gt.input_b, gt.res };
///// End Check rhs_e_25
// pol S_1 = (E_0 `rotr` 6) `xor` (E_0 `rotr` 11) `xor` (E_0 `rotr` 25);
pol commit e_rotr_6_xor_e_rotr_11;
#[S_1_XOR_0]
perform_round { e_rotr_6, e_rotr_11, e_rotr_6_xor_e_rotr_11, xor_sel, u32_tag }
in
bitwise.start_sha256 { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a };
pol commit s_1;
#[S_1_XOR_1]
perform_round { e_rotr_6_xor_e_rotr_11, e_rotr_25, s_1, xor_sel, u32_tag }
in
bitwise.start_sha256 { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a };
// ==== COMPUTING CH ===========
// pol CH_0 = (E_0 `and` F_0) `xor` ((`not` E_0) `and` G_0);
pol commit e_and_f;
#[CH_AND_0]
perform_round { e, f, e_and_f, and_sel, u32_tag }
in
bitwise.start_sha256 { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a };
pol commit not_e;
perform_round * (e + not_e - (2**32 - 1)) = 0;
pol commit not_e_and_g;
#[CH_AND_1]
perform_round { not_e, g, not_e_and_g, and_sel, u32_tag }
in
bitwise.start_sha256 { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a };
pol commit ch;
#[CH_XOR]
perform_round { e_and_f, not_e_and_g, ch, xor_sel, u32_tag }
in
bitwise.start_sha256 { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a };
// ===== COMPUTING TMP 1 ===========
// Lookup round constants
pol commit round_constant;
#[ROUND_CONSTANT]
perform_round { round_count, round_constant }
in
precomputed.sel_sha256_compression { precomputed.clk, precomputed.sha256_compression_round_constant };
pol TMP_1 = h + s_1 + ch + round_constant + w;
// ===== S0 ==================
// pol S_0_0 = (A_0 `rotr` 2) `xor` (A_0 `rotr` 13) `xor` (A_0 `rotr` 22);
// a `rotr` 2
pol commit a_rotr_2;
pol commit lhs_a_2;
pol commit rhs_a_2;
perform_round * (a - (lhs_a_2 * 2**2 + rhs_a_2)) = 0;
perform_round * (a_rotr_2 - (rhs_a_2 * 2**30 + lhs_a_2)) = 0;
///// Check rhs_a_2 < 2**2 (we don't have to check lhs_a_2 < 2**30 (32 - 2), see NOTE_ON_ROTATIONS at the top of this file
pol commit two_pow_2; // todo: while no constants in lookup
perform_round * (two_pow_2 - 2**2) = 0;
#[RANGE_RHS_A_2]
perform_round { two_pow_2, rhs_a_2, /*result = 1*/ perform_round }
in
gt.sel_sha256 { gt.input_a, gt.input_b, gt.res };
///// End Check rhs_a_2
// a `rotr` 13
pol commit a_rotr_13;
pol commit lhs_a_13;
pol commit rhs_a_13;
perform_round * (a - (lhs_a_13 * 2**13 + rhs_a_13)) = 0;
perform_round * (a_rotr_13 - (rhs_a_13 * 2**19 + lhs_a_13)) = 0;
///// Check rhs_a_13 < 2**13 (we don't have to check lhs_a_13 < 2**19 (32 - 13), see NOTE_ON_ROTATIONS at the top of this file
pol commit two_pow_13; // todo: while no constants in lookup
perform_round * (two_pow_13 - 2**13) = 0;
#[RANGE_RHS_A_13]
perform_round { two_pow_13, rhs_a_13, /*result = 1*/ perform_round }
in
gt.sel_sha256 { gt.input_a, gt.input_b, gt.res };
///// End Check rhs_a_13
// a `rotr` 22
pol commit a_rotr_22;
pol commit lhs_a_22;
pol commit rhs_a_22;
perform_round * (a - (lhs_a_22 * 2**22 + rhs_a_22)) = 0;
perform_round * (a_rotr_22 - (rhs_a_22 * 2**10 + lhs_a_22)) = 0;
///// Check rhs_a_22 < 2**22 (we don't have to check lhs_a_22 < 2**10 (32 - 10), see NOTE_ON_ROTATIONS at the top of this file
pol commit two_pow_22; // todo: while no constants in lookup
perform_round * (two_pow_22 - 2**22) = 0;
#[RANGE_RHS_A_22]
perform_round { two_pow_22, rhs_a_22, /*result = 1*/ perform_round }
in
gt.sel_sha256 { gt.input_a, gt.input_b, gt.res };
///// End Check rhs_a_22
// (A_0 `rotr` 2) `xor` (A_0 `rotr` 13)
pol commit a_rotr_2_xor_a_rotr_13;
#[S_0_XOR_0]
perform_round { a_rotr_2, a_rotr_13, a_rotr_2_xor_a_rotr_13, xor_sel, u32_tag }
in
bitwise.start_sha256 { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a };
pol commit s_0;
#[S_0_XOR_1]
perform_round { a_rotr_2_xor_a_rotr_13, a_rotr_22, s_0, xor_sel, u32_tag }
in
bitwise.start_sha256 { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a };
// ====== Computing Maj =========
// pol MAJ_0 = (A_0 `and` B_0) `xor` (A_0 `and` C_0) `xor` (B_0 `and` C_0);
pol commit a_and_b;
#[MAJ_AND_0]
perform_round { a, b, a_and_b, and_sel, u32_tag }
in
bitwise.start_sha256 { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a };
pol commit a_and_c;
#[MAJ_AND_1]
perform_round { a, c, a_and_c, and_sel, u32_tag }
in
bitwise.start_sha256 { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a };
pol commit b_and_c;
#[MAJ_AND_2]
perform_round { b, c, b_and_c, and_sel, u32_tag }
in
bitwise.start_sha256 { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a };
pol commit a_and_b_xor_a_and_c;
#[MAJ_XOR_0]
perform_round { a_and_b, a_and_c, a_and_b_xor_a_and_c, xor_sel, u32_tag }
in
bitwise.start_sha256 { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a };
pol commit maj;
#[MAJ_XOR_1]
perform_round { a_and_b_xor_a_and_c, b_and_c, maj, xor_sel, u32_tag }
in
bitwise.start_sha256 { bitwise.acc_ia, bitwise.acc_ib, bitwise.acc_ic, bitwise.op_id, bitwise.tag_a };
// ==== Compute TMP 2 ====
pol NEXT_A = s_0 + maj + TMP_1;
pol commit next_a_lhs;
pol commit next_a_rhs;
perform_round * ((next_a_lhs * 2**32 + next_a_rhs) - NEXT_A) = 0;
// Check Modulo Add Operation
#[RANGE_COMP_NEXT_A_LHS]
perform_round { two_pow_32, next_a_lhs, /*result = 1*/ perform_round }
in
gt.sel_sha256 { gt.input_a, gt.input_b, gt.res };
#[RANGE_COMP_NEXT_A_RHS]
perform_round { two_pow_32, next_a_rhs, /*result = 1*/ perform_round }
in
gt.sel_sha256 { gt.input_a, gt.input_b, gt.res };
pol NEXT_E = d + TMP_1;
pol commit next_e_lhs;
pol commit next_e_rhs;
perform_round * ((next_e_lhs * 2**32 + next_e_rhs) - NEXT_E) = 0;
// Check Modulo Add Operation
#[RANGE_COMP_NEXT_E_LHS]
perform_round { two_pow_32, next_e_lhs, /*result = 1*/ perform_round }
in
gt.sel_sha256 { gt.input_a, gt.input_b, gt.res };
#[RANGE_COMP_NEXT_E_RHS]
perform_round { two_pow_32, next_e_rhs, /*result = 1*/ perform_round }
in
gt.sel_sha256 { gt.input_a, gt.input_b, gt.res };
perform_round * (a' - next_a_rhs) = 0;
perform_round * (b' - a) = 0;
perform_round * (c' - b) = 0;
perform_round * (d' - c) = 0;
perform_round * (e' - next_e_rhs) = 0;
perform_round * (f' - e) = 0;
perform_round * (g' - f) = 0;
perform_round * (h' - g) = 0;
// TODO: These constraints could be better - we might have to reverse the order of the rounds
pol OUT_A = a + init_a;
last * (OUT_A - (output_a_lhs * 2**32 + output_a_rhs)) = 0;
pol OUT_B = b + init_b;
last * (OUT_B - (output_b_lhs * 2**32 + output_b_rhs)) = 0;
pol OUT_C = c + init_c;
last * (OUT_C - (output_c_lhs * 2**32 + output_c_rhs)) = 0;
pol OUT_D = d + init_d;
last * (OUT_D - (output_d_lhs * 2**32 + output_d_rhs)) = 0;
pol OUT_E = e + init_e;
last * (OUT_E - (output_e_lhs * 2**32 + output_e_rhs)) = 0;
pol OUT_F = f + init_f;
last * (OUT_F - (output_f_lhs * 2**32 + output_f_rhs)) = 0;
pol OUT_G = g + init_g;
last * (OUT_G - (output_g_lhs * 2**32 + output_g_rhs)) = 0;
pol OUT_H = h + init_h;
last * (OUT_H - (output_h_lhs * 2**32 + output_h_rhs)) = 0;
// Check Modulo Add Operation for final outputs
#[RANGE_COMP_A_LHS]
last { two_pow_32, output_a_lhs, /*result = 1*/ last }
in
gt.sel_sha256 { gt.input_a, gt.input_b, gt.res };
#[RANGE_COMP_A_RHS]
last { two_pow_32, output_a_rhs, /*result = 1*/ last }
in
gt.sel_sha256 { gt.input_a, gt.input_b, gt.res };
#[RANGE_COMP_B_LHS]
last { two_pow_32, output_b_lhs, /*result = 1*/ last }
in
gt.sel_sha256 { gt.input_a, gt.input_b, gt.res };
#[RANGE_COMP_B_RHS]
last { two_pow_32, output_b_rhs, /*result = 1*/ last }
in
gt.sel_sha256 { gt.input_a, gt.input_b, gt.res };
#[RANGE_COMP_C_LHS]
last { two_pow_32, output_c_lhs, /*result = 1*/ last }
in
gt.sel_sha256 { gt.input_a, gt.input_b, gt.res };
#[RANGE_COMP_C_RHS]
last { two_pow_32, output_c_rhs, /*result = 1*/ last }
in
gt.sel_sha256 { gt.input_a, gt.input_b, gt.res };
#[RANGE_COMP_D_LHS]
last { two_pow_32, output_d_lhs, /*result = 1*/ last }
in
gt.sel_sha256 { gt.input_a, gt.input_b, gt.res };
#[RANGE_COMP_D_RHS]
last { two_pow_32, output_d_rhs, /*result = 1*/ last }
in
gt.sel_sha256 { gt.input_a, gt.input_b, gt.res };
#[RANGE_COMP_E_LHS]
last { two_pow_32, output_e_lhs, /*result = 1*/ last }
in
gt.sel_sha256 { gt.input_a, gt.input_b, gt.res };
#[RANGE_COMP_E_RHS]
last { two_pow_32, output_e_rhs, /*result = 1*/ last }
in
gt.sel_sha256 { gt.input_a, gt.input_b, gt.res };
#[RANGE_COMP_F_LHS]
last { two_pow_32, output_f_lhs, /*result = 1*/ last }
in
gt.sel_sha256 { gt.input_a, gt.input_b, gt.res };
#[RANGE_COMP_F_RHS]
last { two_pow_32, output_f_rhs, /*result = 1*/ last }
in
gt.sel_sha256 { gt.input_a, gt.input_b, gt.res };
#[RANGE_COMP_G_LHS]
last { two_pow_32, output_g_lhs, /*result = 1*/ last }
in
gt.sel_sha256 { gt.input_a, gt.input_b, gt.res };
#[RANGE_COMP_G_RHS]
last { two_pow_32, output_g_rhs, /*result = 1*/ last }
in
gt.sel_sha256 { gt.input_a, gt.input_b, gt.res };
#[RANGE_COMP_H_LHS]
last { two_pow_32, output_h_lhs, /*result = 1*/ last }
in
gt.sel_sha256 { gt.input_a, gt.input_b, gt.res };
#[RANGE_COMP_H_RHS]
last { two_pow_32, output_h_rhs, /*result = 1*/ last }
in
gt.sel_sha256 { gt.input_a, gt.input_b, gt.res };