4242 - [ ` verify_cell_proof ` ] ( #verify_cell_proof )
4343 - [ ` verify_cell_proof_batch ` ] ( #verify_cell_proof_batch )
4444- [ Reconstruction] ( #reconstruction )
45+ - [ ` construct_vanishing_polynomial ` ] ( #construct_vanishing_polynomial )
46+ - [ ` recover_shifted_data ` ] ( #recover_shifted_data )
47+ - [ ` recover_original_data ` ] ( #recover_original_data )
4548 - [ ` recover_polynomial ` ] ( #recover_polynomial )
4649
4750<!-- END doctoc generated TOC please keep comment here to allow auto update -->
@@ -508,29 +511,15 @@ def construct_vanishing_polynomial(cell_ids: Sequence[CellID],
508511
509512### ` recover_shifted_data `
510513
511- ### ` recover_original_data `
512-
513- ### ` recover_polynomial `
514-
515514``` python
516- def recover_polynomial (cell_ids : Sequence[CellID],
517- cells_bytes : Sequence[Vector[Bytes32, FIELD_ELEMENTS_PER_CELL ]]) -> Polynomial:
518- """
519- Recovers a polynomial from 2 * FIELD_ELEMENTS_PER_CELL evaluations, half of which can be missing.
520-
521- This algorithm uses FFTs to recover cells faster than using Lagrange implementation. However,
522- a faster version thanks to Qi Zhou can be found here:
523- https://github.com/ethereum/research/blob/51b530a53bd4147d123ab3e390a9d08605c2cdb8/polynomial_reconstruction/polynomial_reconstruction_danksharding.py
524-
525- Public method.
526- """
527- assert len (cell_ids) == len (cells_bytes)
528-
529- cells = [bytes_to_cell(cell_bytes) for cell_bytes in cells_bytes]
530- assert len (cells) >= CELLS_PER_BLOB // 2
531-
532- zero_poly_coeff, zero_poly_eval, zero_poly_eval_brp = construct_vanishing_polynomial(cell_ids, cells)
533-
515+ def recover_shifted_data (cell_ids : Sequence[CellID],
516+ cells : Sequence[Cell],
517+ zero_poly_eval : Sequence[BLSFieldElement],
518+ zero_poly_coeff : Sequence[BLSFieldElement],
519+ roots_of_unity_extended : Sequence[BLSFieldElement]) -> Tuple[
520+ Sequence[BLSFieldElement],
521+ Sequence[BLSFieldElement],
522+ BLSFieldElement]:
534523 extended_evaluation_rbo = [0 ] * (FIELD_ELEMENTS_PER_BLOB * 2 )
535524 for cell_id, cell in zip (cell_ids, cells):
536525 start = cell_id * FIELD_ELEMENTS_PER_CELL
@@ -541,8 +530,6 @@ def recover_polynomial(cell_ids: Sequence[CellID],
541530 extended_evaluation_times_zero = [BLSFieldElement(int (a) * int (b) % BLS_MODULUS )
542531 for a, b in zip (zero_poly_eval, extended_evaluation)]
543532
544- roots_of_unity_extended = compute_roots_of_unity(2 * FIELD_ELEMENTS_PER_BLOB )
545-
546533 extended_evaluations_fft = fft_field(extended_evaluation_times_zero, roots_of_unity_extended, inv = True )
547534
548535 shift_factor = BLSFieldElement(PRIMITIVE_ROOT_OF_UNITY )
@@ -554,6 +541,16 @@ def recover_polynomial(cell_ids: Sequence[CellID],
554541 eval_shifted_extended_evaluation = fft_field(shifted_extended_evaluation, roots_of_unity_extended)
555542 eval_shifted_zero_poly = fft_field(shifted_zero_poly, roots_of_unity_extended)
556543
544+ return eval_shifted_extended_evaluation, eval_shifted_zero_poly, shift_inv
545+ ```
546+
547+ ### ` recover_original_data `
548+
549+ ``` python
550+ def recover_original_data (eval_shifted_extended_evaluation : Sequence[BLSFieldElement],
551+ eval_shifted_zero_poly : Sequence[BLSFieldElement],
552+ shift_inv : BLSFieldElement,
553+ roots_of_unity_extended : Sequence[BLSFieldElement]) -> Sequence[BLSFieldElement]:
557554 eval_shifted_reconstructed_poly = [
558555 div(a, b)
559556 for a, b in zip (eval_shifted_extended_evaluation, eval_shifted_zero_poly)
@@ -565,6 +562,39 @@ def recover_polynomial(cell_ids: Sequence[CellID],
565562
566563 reconstructed_data = bit_reversal_permutation(fft_field(reconstructed_poly, roots_of_unity_extended))
567564
565+ return reconstructed_data
566+ ```
567+
568+ ### ` recover_polynomial `
569+
570+ ``` python
571+ def recover_polynomial (cell_ids : Sequence[CellID],
572+ cells_bytes : Sequence[Vector[Bytes32, FIELD_ELEMENTS_PER_CELL ]]) -> Polynomial:
573+ """
574+ Recovers a polynomial from 2 * FIELD_ELEMENTS_PER_CELL evaluations, half of which can be missing.
575+
576+ This algorithm uses FFTs to recover cells faster than using Lagrange implementation. However,
577+ a faster version thanks to Qi Zhou can be found here:
578+ https://github.com/ethereum/research/blob/51b530a53bd4147d123ab3e390a9d08605c2cdb8/polynomial_reconstruction/polynomial_reconstruction_danksharding.py
579+
580+ Public method.
581+ """
582+ assert len (cell_ids) == len (cells_bytes)
583+
584+ cells = [bytes_to_cell(cell_bytes) for cell_bytes in cells_bytes]
585+ assert len (cells) >= CELLS_PER_BLOB // 2
586+
587+ roots_of_unity_extended = compute_roots_of_unity(2 * FIELD_ELEMENTS_PER_BLOB )
588+
589+ zero_poly_coeff, zero_poly_eval, zero_poly_eval_brp = construct_vanishing_polynomial(cell_ids, cells)
590+
591+ eval_shifted_extended_evaluation, eval_shifted_zero_poly, shift_inv = \
592+ recover_shifted_data(cell_ids, cells, zero_poly_eval, zero_poly_coeff, roots_of_unity_extended)
593+
594+ reconstructed_data = \
595+ recover_original_data(eval_shifted_extended_evaluation, eval_shifted_zero_poly, shift_inv,
596+ roots_of_unity_extended)
597+
568598 for cell_id, cell in zip (cell_ids, cells):
569599 start = cell_id * FIELD_ELEMENTS_PER_CELL
570600 end = (cell_id + 1 ) * FIELD_ELEMENTS_PER_CELL
0 commit comments